diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci index 840727fc75dc..6c5ca95a9410 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci +++ b/Documentation/ABI/testing/sysfs-bus-pci @@ -163,6 +163,17 @@ Description: will be present in sysfs. Writing 1 to this file will perform reset. +What: /sys/bus/pci/devices/.../reset_subordinate +Date: October 2024 +Contact: linux-pci@vger.kernel.org +Description: + This is visible only for bridge devices. If you want to reset + all devices attached through the subordinate bus of a specific + bridge device, writing 1 to this will try to do it. This will + affect all devices attached to the system through this bridge + similiar to writing 1 to their individual "reset" file, so use + with caution. + What: /sys/bus/pci/devices/.../vpd Date: February 2008 Contact: Ben Hutchings diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index 78c26280c473..1468609052c7 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -514,6 +514,7 @@ Description: information about CPUs heterogeneity. What: /sys/devices/system/cpu/vulnerabilities /sys/devices/system/cpu/vulnerabilities/gather_data_sampling + /sys/devices/system/cpu/vulnerabilities/indirect_target_selection /sys/devices/system/cpu/vulnerabilities/itlb_multihit /sys/devices/system/cpu/vulnerabilities/l1tf /sys/devices/system/cpu/vulnerabilities/mds diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs index 483639fb727b..a411ce2c7501 100644 --- a/Documentation/ABI/testing/sysfs-fs-f2fs +++ b/Documentation/ABI/testing/sysfs-fs-f2fs @@ -303,10 +303,13 @@ Description: Do background GC aggressively when set. Set to 0 by default. GC approach and turns SSR mode on. gc urgent low(2): lowers the bar of checking I/O idling in order to process outstanding discard commands and GC a - little bit aggressively. uses cost benefit GC approach. + little bit aggressively. always uses cost benefit GC approach, + and will override age-threshold GC approach if ATGC is enabled + at the same time. gc urgent mid(3): does GC forcibly in a period of given gc_urgent_sleep_time and executes a mid level of I/O idling check. - uses cost benefit GC approach. + always uses cost benefit GC approach, and will override + age-threshold GC approach if ATGC is enabled at the same time. What: /sys/fs/f2fs//gc_urgent_sleep_time Date: August 2017 diff --git a/Documentation/RCU/stallwarn.rst b/Documentation/RCU/stallwarn.rst index e38c587067fc..397e067db566 100644 --- a/Documentation/RCU/stallwarn.rst +++ b/Documentation/RCU/stallwarn.rst @@ -243,7 +243,7 @@ ticks this GP)" indicates that this CPU has not taken any scheduling-clock interrupts during the current stalled grace period. The "idle=" portion of the message prints the dyntick-idle state. -The hex number before the first "/" is the low-order 12 bits of the +The hex number before the first "/" is the low-order 16 bits of the dynticks counter, which will have an even-numbered value if the CPU is in dyntick-idle mode and an odd-numbered value otherwise. The hex number between the two "/"s is the value of the nesting, which will be diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index 3e4a14e38b49..dc69ba0b05e4 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -22,3 +22,4 @@ are configurable at compile, boot or run time. gather_data_sampling.rst srso reg-file-data-sampling + indirect-target-selection diff --git a/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst new file mode 100644 index 000000000000..4788e14ebce0 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/indirect-target-selection.rst @@ -0,0 +1,156 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Indirect Target Selection (ITS) +=============================== + +ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were +released before Alder Lake. ITS may allow an attacker to control the prediction +of indirect branches and RETs located in the lower half of a cacheline. + +ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium). + +Scope of Impact +--------------- +- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be + predicted with unintended target corresponding to a branch in the guest. + +- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native + gadgets. + +- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect + branches may still be predicted with targets corresponding to direct branches + executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which + should be available via distro updates. Alternatively microcode can be + obtained from Intel's github repository [#f1]_. + +Affected CPUs +------------- +Below is the list of ITS affected CPUs [#f2]_ [#f3]_: + + ======================== ============ ==================== =============== + Common name Family_Model eIBRS Intra-mode BTI + Guest/Host Isolation + ======================== ============ ==================== =============== + SKYLAKE_X (step >= 6) 06_55H Affected Affected + ICELAKE_X 06_6AH Not affected Affected + ICELAKE_D 06_6CH Not affected Affected + ICELAKE_L 06_7EH Not affected Affected + TIGERLAKE_L 06_8CH Not affected Affected + TIGERLAKE 06_8DH Not affected Affected + KABYLAKE_L (step >= 12) 06_8EH Affected Affected + KABYLAKE (step >= 13) 06_9EH Affected Affected + COMETLAKE 06_A5H Affected Affected + COMETLAKE_L 06_A6H Affected Affected + ROCKETLAKE 06_A7H Not affected Affected + ======================== ============ ==================== =============== + +- All affected CPUs enumerate Enhanced IBRS feature. +- IBPB isolation is affected on all ITS affected CPUs, and need a microcode + update for mitigation. +- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden + Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the + host's affected status. +- Intel Atom CPUs are not affected by ITS. + +Mitigation +---------- +As only the indirect branches and RETs that have their last byte of instruction +in the lower half of the cacheline are vulnerable to ITS, the basic idea behind +the mitigation is to not allow indirect branches in the lower half. + +This is achieved by relying on existing retpoline support in the kernel, and in +compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly +added ITS-safe thunks. These safe thunks consists of indirect branch in the +second half of the cacheline. Not all retpoline sites are patched to thunks, if +a retpoline site is evaluated to be ITS-safe, it is replaced with an inline +indirect branch. + +Dynamic thunks +~~~~~~~~~~~~~~ +From a dynamically allocated pool of safe-thunks, each vulnerable site is +replaced with a new thunk, such that they get a unique address. This could +improve the branch prediction accuracy. Also, it is a defense-in-depth measure +against aliasing. + +Note, for simplicity, indirect branches in eBPF programs are always replaced +with a jump to a static thunk in __x86_indirect_its_thunk_array. If required, +in future this can be changed to use dynamic thunks. + +All vulnerable RETs are replaced with a static thunk, they do not use dynamic +thunks. This is because RETs get their prediction from RSB mostly that does not +depend on source address. RETs that underflow RSB may benefit from dynamic +thunks. But, RETs significantly outnumber indirect branches, and any benefit +from a unique source address could be outweighed by the increased icache +footprint and iTLB pressure. + +Retpoline +~~~~~~~~~ +Retpoline sequence also mitigates ITS-unsafe indirect branches. For this +reason, when retpoline is enabled, ITS mitigation only relocates the RETs to +safe thunks. Unless user requested the RSB-stuffing mitigation. + +Mitigation in guests +^^^^^^^^^^^^^^^^^^^^ +All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration +and Family/Model of the guest. This is because eIBRS feature could be hidden +from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which +indicates that the guest is running on an unaffected host. + +To prevent guests from unnecessarily deploying the mitigation on unaffected +platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When +a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit +is not set by any hardware, but is **intended for VMMs to synthesize** it for +guests as per the host's affected status. + +Mitigation options +^^^^^^^^^^^^^^^^^^ +The ITS mitigation can be controlled using the "indirect_target_selection" +kernel parameter. The available options are: + + ======== =================================================================== + on (default) Deploy the "Aligned branch/return thunks" mitigation. + If spectre_v2 mitigation enables retpoline, aligned-thunks are only + deployed for the affected RET instructions. Retpoline mitigates + indirect branches. + + off Disable ITS mitigation. + + vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation + part of ITS. Otherwise, mitigation is not deployed. This option is + useful when host userspace is not in the threat model, and only + attacks from guest to host are considered. + + force Force the ITS bug and deploy the default mitigation. + ======== =================================================================== + +Sysfs reporting +--------------- + +The sysfs file showing ITS mitigation status is: + + /sys/devices/system/cpu/vulnerabilities/indirect_target_selection + +Note, microcode mitigation status is not reported in this file. + +The possible values in this file are: + +.. list-table:: + + * - Not affected + - The processor is not vulnerable. + * - Vulnerable + - System is vulnerable and no mitigation has been applied. + * - Vulnerable, KVM: Not affected + - System is vulnerable to intra-mode BTI, but not affected by eIBRS + guest/host isolation. + * - Mitigation: Aligned branch/return thunks + - The mitigation is enabled, affected indirect branches and RETs are + relocated to safe thunks. + +References +---------- +.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files + +.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html + +.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 18c8fc60db93..15e40774e9bc 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2025,6 +2025,20 @@ different crypto accelerators. This option can be used to achieve best performance for particular HW. + indirect_target_selection= [X86,Intel] Mitigation control for Indirect + Target Selection(ITS) bug in Intel CPUs. Updated + microcode is also required for a fix in IBPB. + + on: Enable mitigation (default). + off: Disable mitigation. + force: Force the ITS bug and deploy default + mitigation. + vmexit: Only deploy mitigation if CPU is affected by + guest/host isolation part of ITS. + + For details see: + Documentation/admin-guide/hw-vuln/indirect-target-selection.rst + init= [KNL] Format: Run specified binary instead of /sbin/init as init @@ -3263,6 +3277,7 @@ expose users to several CPU vulnerabilities. Equivalent to: if nokaslr then kpti=0 [ARM64] gather_data_sampling=off [X86] + indirect_target_selection=off [X86] kvm.nx_huge_pages=off [X86] l1tf=off [X86] mds=off [X86] @@ -5765,6 +5780,8 @@ Selecting 'on' will also enable the mitigation against user space to user space task attacks. + Selecting specific mitigation does not force enable + user mitigations. Selecting 'off' will disable both the kernel and the user space protections. diff --git a/Documentation/core-api/pin_user_pages.rst b/Documentation/core-api/pin_user_pages.rst index b18416f4500f..7995ce2b9676 100644 --- a/Documentation/core-api/pin_user_pages.rst +++ b/Documentation/core-api/pin_user_pages.rst @@ -113,6 +113,12 @@ pages: This also leads to limitations: there are only 31-10==21 bits available for a counter that increments 10 bits at a time. +* Because of that limitation, special handling is applied to the zero pages + when using FOLL_PIN. We only pretend to pin a zero page - we don't alter its + refcount or pincount at all (it is permanent, so there's no need). The + unpinning functions also don't do anything to a zero page. This is + transparent to the caller. + * Callers must specifically request "dma-pinned tracking of pages". In other words, just calling get_user_pages() will not suffice; a new set of functions, pin_user_page() and related, must be used. diff --git a/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml b/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml index 983033fe5b17..592285f616f5 100644 --- a/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml +++ b/Documentation/devicetree/bindings/clock/adi,axi-clkgen.yaml @@ -26,9 +26,21 @@ properties: description: Specifies the reference clock(s) from which the output frequency is derived. This must either reference one clock if only the first clock - input is connected or two if both clock inputs are connected. - minItems: 1 - maxItems: 2 + input is connected or two if both clock inputs are connected. The last + clock is the AXI bus clock that needs to be enabled so we can access the + core registers. + minItems: 2 + maxItems: 3 + + clock-names: + oneOf: + - items: + - const: clkin1 + - const: s_axi_aclk + - items: + - const: clkin1 + - const: clkin2 + - const: s_axi_aclk '#clock-cells': const: 0 @@ -40,6 +52,7 @@ required: - compatible - reg - clocks + - clock-names - '#clock-cells' additionalProperties: false @@ -50,5 +63,6 @@ examples: compatible = "adi,axi-clkgen-2.00.a"; #clock-cells = <0>; reg = <0xff000000 0x1000>; - clocks = <&osc 1>; + clocks = <&osc 1>, <&clkc 15>; + clock-names = "clkin1", "s_axi_aclk"; }; diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml index 987aa83c2649..e956f524e379 100644 --- a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml +++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml @@ -87,7 +87,7 @@ properties: adi,dsi-lanes: description: Number of DSI data lanes connected to the DSI host. $ref: /schemas/types.yaml#/definitions/uint32 - enum: [ 1, 2, 3, 4 ] + enum: [ 2, 3, 4 ] ports: description: diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml index fee0f023a8c8..b0a0ecef1154 100644 --- a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml +++ b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml @@ -26,7 +26,7 @@ properties: maxItems: 1 spi-max-frequency: - maximum: 30000000 + maximum: 66000000 reset-gpios: maxItems: 1 diff --git a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml index 31840e33dcf5..3452cc9ef337 100644 --- a/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml +++ b/Documentation/devicetree/bindings/leds/leds-class-multicolor.yaml @@ -27,7 +27,7 @@ properties: description: | For multicolor LED support this property should be defined as either LED_COLOR_ID_RGB or LED_COLOR_ID_MULTI which can be found in - include/linux/leds/common.h. + include/dt-bindings/leds/common.h. enum: [ 8, 9 ] required: diff --git a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml index fbface720678..d57999249974 100644 --- a/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml +++ b/Documentation/devicetree/bindings/mfd/rohm,bd71815-pmic.yaml @@ -50,15 +50,15 @@ properties: minimum: 0 maximum: 1 - rohm,charger-sense-resistor-ohms: - minimum: 10000000 - maximum: 50000000 + rohm,charger-sense-resistor-micro-ohms: + minimum: 10000 + maximum: 50000 description: | - BD71827 and BD71828 have SAR ADC for measuring charging currents. - External sense resistor (RSENSE in data sheet) should be used. If - something other but 30MOhm resistor is used the resistance value - should be given here in Ohms. - default: 30000000 + BD71815 has SAR ADC for measuring charging currents. External sense + resistor (RSENSE in data sheet) should be used. If something other + but a 30 mOhm resistor is used the resistance value should be given + here in micro Ohms. + default: 30000 regulators: $ref: ../regulator/rohm,bd71815-regulator.yaml @@ -67,7 +67,7 @@ properties: gpio-reserved-ranges: description: | - Usage of BD71828 GPIO pins can be changed via OTP. This property can be + Usage of BD71815 GPIO pins can be changed via OTP. This property can be used to mark the pins which should not be configured for GPIO. Please see the ../gpio/gpio.txt for more information. @@ -113,7 +113,7 @@ examples: gpio-controller; #gpio-cells = <2>; - rohm,charger-sense-resistor-ohms = <10000000>; + rohm,charger-sense-resistor-micro-ohms = <10000>; regulators { buck1: buck1 { diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml index 802e3ca8be4d..f6bd7d19f461 100644 --- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml +++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml @@ -25,7 +25,7 @@ properties: "#address-cells": const: 1 description: | - The cell is the slot ID if a function subnode is used. + The cell is the SDIO function number if a function subnode is used. "#size-cells": const: 0 diff --git a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml index 364b58730be2..796c09f24f3e 100644 --- a/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml +++ b/Documentation/devicetree/bindings/regulator/mt6315-regulator.yaml @@ -31,10 +31,6 @@ properties: $ref: "regulator.yaml#" unevaluatedProperties: false - properties: - regulator-compatible: - pattern: "^vbuck[1-4]$" - additionalProperties: false required: @@ -52,7 +48,6 @@ examples: regulators { vbuck1 { - regulator-compatible = "vbuck1"; regulator-min-microvolt = <300000>; regulator-max-microvolt = <1193750>; regulator-enable-ramp-delay = <256>; @@ -60,7 +55,6 @@ examples: }; vbuck3 { - regulator-compatible = "vbuck3"; regulator-min-microvolt = <300000>; regulator-max-microvolt = <1193750>; regulator-enable-ramp-delay = <256>; diff --git a/Documentation/devicetree/bindings/serial/rs485.yaml b/Documentation/devicetree/bindings/serial/rs485.yaml index 90a1bab40f05..428da6faef3a 100644 --- a/Documentation/devicetree/bindings/serial/rs485.yaml +++ b/Documentation/devicetree/bindings/serial/rs485.yaml @@ -18,16 +18,15 @@ properties: description: prop-encoded-array $ref: /schemas/types.yaml#/definitions/uint32-array items: - items: - - description: Delay between rts signal and beginning of data sent in - milliseconds. It corresponds to the delay before sending data. - default: 0 - maximum: 100 - - description: Delay between end of data sent and rts signal in milliseconds. - It corresponds to the delay after sending data and actual release - of the line. - default: 0 - maximum: 100 + - description: Delay between rts signal and beginning of data sent in + milliseconds. It corresponds to the delay before sending data. + default: 0 + maximum: 100 + - description: Delay between end of data sent and rts signal in milliseconds. + It corresponds to the delay after sending data and actual release + of the line. + default: 0 + maximum: 100 rs485-rts-active-low: description: drive RTS low when sending (default is high). diff --git a/Documentation/devicetree/bindings/sound/mt6359.yaml b/Documentation/devicetree/bindings/sound/mt6359.yaml index 23d411fc4200..128698630c86 100644 --- a/Documentation/devicetree/bindings/sound/mt6359.yaml +++ b/Documentation/devicetree/bindings/sound/mt6359.yaml @@ -23,8 +23,8 @@ properties: Indicates how many data pins are used to transmit two channels of PDM signal. 0 means two wires, 1 means one wire. Default value is 0. enum: - - 0 # one wire - - 1 # two wires + - 0 # two wires + - 1 # one wire mediatek,mic-type-0: $ref: /schemas/types.yaml#/definitions/uint32 @@ -53,9 +53,9 @@ additionalProperties: false examples: - | - mt6359codec: mt6359codec { - mediatek,dmic-mode = <0>; - mediatek,mic-type-0 = <2>; + mt6359codec: audio-codec { + mediatek,dmic-mode = <0>; + mediatek,mic-type-0 = <2>; }; ... diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 6e323a380294..77e9413cdee0 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -869,6 +869,8 @@ patternProperties: description: National Semiconductor "^nec,.*": description: NEC LCD Technologies, Ltd. + "^neofidelity,.*": + description: Neofidelity Inc. "^neonode,.*": description: Neonode Inc. "^netgear,.*": diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst index 604208534095..833f68fb0700 100644 --- a/Documentation/driver-api/fpga/fpga-bridge.rst +++ b/Documentation/driver-api/fpga/fpga-bridge.rst @@ -6,9 +6,12 @@ API to implement a new FPGA bridge * struct fpga_bridge - The FPGA Bridge structure * struct fpga_bridge_ops - Low level Bridge driver ops -* fpga_bridge_register() - Create and register a bridge +* __fpga_bridge_register() - Create and register a bridge * fpga_bridge_unregister() - Unregister a bridge +The helper macro ``fpga_bridge_register()`` automatically sets +the module that registers the FPGA bridge as the owner. + .. kernel-doc:: include/linux/fpga/fpga-bridge.h :functions: fpga_bridge @@ -16,7 +19,7 @@ API to implement a new FPGA bridge :functions: fpga_bridge_ops .. kernel-doc:: drivers/fpga/fpga-bridge.c - :functions: fpga_bridge_register + :functions: __fpga_bridge_register .. kernel-doc:: drivers/fpga/fpga-bridge.c :functions: fpga_bridge_unregister diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst index 49c0a9512653..8d2b79f696c1 100644 --- a/Documentation/driver-api/fpga/fpga-mgr.rst +++ b/Documentation/driver-api/fpga/fpga-mgr.rst @@ -24,7 +24,8 @@ How to support a new FPGA device -------------------------------- To add another FPGA manager, write a driver that implements a set of ops. The -probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as:: +probe function calls ``fpga_mgr_register()`` or ``fpga_mgr_register_full()``, +such as:: static const struct fpga_manager_ops socfpga_fpga_ops = { .write_init = socfpga_fpga_ops_configure_init, @@ -69,10 +70,11 @@ probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as:: } Alternatively, the probe function could call one of the resource managed -register functions, devm_fpga_mgr_register() or devm_fpga_mgr_register_full(). -When these functions are used, the parameter syntax is the same, but the call -to fpga_mgr_unregister() should be removed. In the above example, the -socfpga_fpga_remove() function would not be required. +register functions, ``devm_fpga_mgr_register()`` or +``devm_fpga_mgr_register_full()``. When these functions are used, the +parameter syntax is the same, but the call to ``fpga_mgr_unregister()`` should be +removed. In the above example, the ``socfpga_fpga_remove()`` function would not be +required. The ops will implement whatever device specific register writes are needed to do the programming sequence for this particular FPGA. These ops return 0 for @@ -125,15 +127,19 @@ API for implementing a new FPGA Manager driver * struct fpga_manager - the FPGA manager struct * struct fpga_manager_ops - Low level FPGA manager driver ops * struct fpga_manager_info - Parameter structure for fpga_mgr_register_full() -* fpga_mgr_register_full() - Create and register an FPGA manager using the +* __fpga_mgr_register_full() - Create and register an FPGA manager using the fpga_mgr_info structure to provide the full flexibility of options -* fpga_mgr_register() - Create and register an FPGA manager using standard +* __fpga_mgr_register() - Create and register an FPGA manager using standard arguments -* devm_fpga_mgr_register_full() - Resource managed version of - fpga_mgr_register_full() -* devm_fpga_mgr_register() - Resource managed version of fpga_mgr_register() +* __devm_fpga_mgr_register_full() - Resource managed version of + __fpga_mgr_register_full() +* __devm_fpga_mgr_register() - Resource managed version of __fpga_mgr_register() * fpga_mgr_unregister() - Unregister an FPGA manager +Helper macros ``fpga_mgr_register_full()``, ``fpga_mgr_register()``, +``devm_fpga_mgr_register_full()``, and ``devm_fpga_mgr_register()`` are available +to ease the registration. + .. kernel-doc:: include/linux/fpga/fpga-mgr.h :functions: fpga_mgr_states @@ -147,16 +153,16 @@ API for implementing a new FPGA Manager driver :functions: fpga_manager_info .. kernel-doc:: drivers/fpga/fpga-mgr.c - :functions: fpga_mgr_register_full + :functions: __fpga_mgr_register_full .. kernel-doc:: drivers/fpga/fpga-mgr.c - :functions: fpga_mgr_register + :functions: __fpga_mgr_register .. kernel-doc:: drivers/fpga/fpga-mgr.c - :functions: devm_fpga_mgr_register_full + :functions: __devm_fpga_mgr_register_full .. kernel-doc:: drivers/fpga/fpga-mgr.c - :functions: devm_fpga_mgr_register + :functions: __devm_fpga_mgr_register .. kernel-doc:: drivers/fpga/fpga-mgr.c :functions: fpga_mgr_unregister diff --git a/Documentation/driver-api/serial/driver.rst b/Documentation/driver-api/serial/driver.rst index 23c6b956cd90..9436f7c11306 100644 --- a/Documentation/driver-api/serial/driver.rst +++ b/Documentation/driver-api/serial/driver.rst @@ -100,4 +100,4 @@ Some helpers are provided in order to set/get modem control lines via GPIO. .. kernel-doc:: drivers/tty/serial/serial_mctrl_gpio.c :identifiers: mctrl_gpio_init mctrl_gpio_free mctrl_gpio_to_gpiod mctrl_gpio_set mctrl_gpio_get mctrl_gpio_enable_ms - mctrl_gpio_disable_ms + mctrl_gpio_disable_ms_sync mctrl_gpio_disable_ms_no_sync diff --git a/Documentation/filesystems/mount_api.rst b/Documentation/filesystems/mount_api.rst index 1d16787a00e9..253078b99799 100644 --- a/Documentation/filesystems/mount_api.rst +++ b/Documentation/filesystems/mount_api.rst @@ -778,7 +778,8 @@ process the parameters it is given. * :: - bool fs_validate_description(const struct fs_parameter_description *desc); + bool fs_validate_description(const char *name, + const struct fs_parameter_description *desc); This performs some validation checks on a parameter description. It returns true if the description is good and false if it is not. It will diff --git a/Documentation/hwmon/dell-smm-hwmon.rst b/Documentation/hwmon/dell-smm-hwmon.rst index d8f1d6859b96..1c12fbba440b 100644 --- a/Documentation/hwmon/dell-smm-hwmon.rst +++ b/Documentation/hwmon/dell-smm-hwmon.rst @@ -32,12 +32,12 @@ Temperature sensors and fans can be queried and set via the standard =============================== ======= ======================================= Name Perm Description =============================== ======= ======================================= -fan[1-3]_input RO Fan speed in RPM. -fan[1-3]_label RO Fan label. -fan[1-3]_min RO Minimal Fan speed in RPM -fan[1-3]_max RO Maximal Fan speed in RPM -fan[1-3]_target RO Expected Fan speed in RPM -pwm[1-3] RW Control the fan PWM duty-cycle. +fan[1-4]_input RO Fan speed in RPM. +fan[1-4]_label RO Fan label. +fan[1-4]_min RO Minimal Fan speed in RPM +fan[1-4]_max RO Maximal Fan speed in RPM +fan[1-4]_target RO Expected Fan speed in RPM +pwm[1-4] RW Control the fan PWM duty-cycle. pwm1_enable WO Enable or disable automatic BIOS fan control (not supported on all laptops, see below for details). @@ -93,7 +93,7 @@ Again, when you find new codes, we'd be happy to have your patches! --------------------------- The driver also exports the fans as thermal cooling devices with -``type`` set to ``dell-smm-fan[1-3]``. This allows for easy fan control +``type`` set to ``dell-smm-fan[1-4]``. This allows for easy fan control using one of the thermal governors. Module parameters diff --git a/Documentation/kbuild/kconfig.rst b/Documentation/kbuild/kconfig.rst index 5967c79c3baa..eee0d298774a 100644 --- a/Documentation/kbuild/kconfig.rst +++ b/Documentation/kbuild/kconfig.rst @@ -54,6 +54,15 @@ KCONFIG_OVERWRITECONFIG If you set KCONFIG_OVERWRITECONFIG in the environment, Kconfig will not break symlinks when .config is a symlink to somewhere else. +KCONFIG_WARN_UNKNOWN_SYMBOLS +---------------------------- +This environment variable makes Kconfig warn about all unrecognized +symbols in the config input. + +KCONFIG_WERROR +-------------- +If set, Kconfig treats warnings as errors. + `CONFIG_` --------- If you set `CONFIG_` in the environment, Kconfig will prefix all symbols diff --git a/Documentation/locking/seqlock.rst b/Documentation/locking/seqlock.rst index bfda1a5fecad..ec6411d02ac8 100644 --- a/Documentation/locking/seqlock.rst +++ b/Documentation/locking/seqlock.rst @@ -153,7 +153,7 @@ Use seqcount_latch_t when the write side sections cannot be protected from interruption by readers. This is typically the case when the read side can be invoked from NMI handlers. -Check `raw_write_seqcount_latch()` for more information. +Check `write_seqcount_latch()` for more information. .. _seqlock_t: diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst index b705d2801e9c..80b1c5e19fd5 100644 --- a/Documentation/networking/j1939.rst +++ b/Documentation/networking/j1939.rst @@ -121,7 +121,7 @@ format, the Group Extension is set in the PS-field. On the other hand, when using PDU1 format, the PS-field contains a so-called Destination Address, which is _not_ part of the PGN. When communicating a PGN -from user space to kernel (or vice versa) and PDU2 format is used, the PS-field +from user space to kernel (or vice versa) and PDU1 format is used, the PS-field of the PGN shall be set to zero. The Destination Address shall be set elsewhere. diff --git a/Documentation/networking/strparser.rst b/Documentation/networking/strparser.rst index 6cab1f74ae05..7f623d1db72a 100644 --- a/Documentation/networking/strparser.rst +++ b/Documentation/networking/strparser.rst @@ -112,7 +112,7 @@ Functions Callbacks ========= -There are six callbacks: +There are seven callbacks: :: @@ -182,6 +182,13 @@ There are six callbacks: the length of the message. skb->len - offset may be greater then full_len since strparser does not trim the skb. + :: + + int (*read_sock)(struct strparser *strp, read_descriptor_t *desc, + sk_read_actor_t recv_actor); + + The read_sock callback is used by strparser instead of + sock->ops->read_sock, if provided. :: int (*read_sock_done)(struct strparser *strp, int err); diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst index 65b86e487afe..b6d5a3a8febc 100644 --- a/Documentation/power/runtime_pm.rst +++ b/Documentation/power/runtime_pm.rst @@ -347,7 +347,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: `int pm_runtime_resume_and_get(struct device *dev);` - run pm_runtime_resume(dev) and if successful, increment the device's - usage counter; return the result of pm_runtime_resume + usage counter; returns 0 on success (whether or not the device's + runtime PM status was already 'active') or the error code from + pm_runtime_resume() on failure. `int pm_request_idle(struct device *dev);` - submit a request to execute the subsystem-level idle callback for the diff --git a/Documentation/timers/no_hz.rst b/Documentation/timers/no_hz.rst index f8786be15183..7fe8ef9718d8 100644 --- a/Documentation/timers/no_hz.rst +++ b/Documentation/timers/no_hz.rst @@ -129,11 +129,8 @@ adaptive-tick CPUs: At least one non-adaptive-tick CPU must remain online to handle timekeeping tasks in order to ensure that system calls like gettimeofday() returns accurate values on adaptive-tick CPUs. (This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no running -user processes to observe slight drifts in clock rate.) Therefore, the -boot CPU is prohibited from entering adaptive-ticks mode. Specifying a -"nohz_full=" mask that includes the boot CPU will result in a boot-time -error message, and the boot CPU will be removed from the mask. Note that -this means that your system must have at least two CPUs in order for +user processes to observe slight drifts in clock rate.) Note that this +means that your system must have at least two CPUs in order for CONFIG_NO_HZ_FULL=y to do anything for you. Finally, adaptive-ticks CPUs must have their RCU callbacks offloaded. diff --git a/MAINTAINERS b/MAINTAINERS index 4b19dfb5d2fd..428b2259225d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4833,6 +4833,7 @@ S: Maintained F: Documentation/admin-guide/module-signing.rst F: certs/ F: scripts/sign-file.c +F: scripts/ssl-common.h F: tools/certs/ CFAG12864B LCD DRIVER diff --git a/Makefile b/Makefile index ca304cece572..70969a997a7f 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 1 -SUBLEVEL = 118 +SUBLEVEL = 141 EXTRAVERSION = NAME = Curry Ramen @@ -528,7 +528,7 @@ KGZIP = gzip KBZIP2 = bzip2 KLZOP = lzop LZMA = lzma -LZ4 = lz4c +LZ4 = lz4 XZ = xz ZSTD = zstd @@ -875,6 +875,18 @@ ifdef CONFIG_CC_IS_CLANG KBUILD_CPPFLAGS += -Qunused-arguments # The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable. KBUILD_CFLAGS += -Wno-gnu + +# Clang may emit a warning when a const variable, such as the dummy variables +# in typecheck(), or const member of an aggregate type are not initialized, +# which can result in unexpected behavior. However, in many audited cases of +# the "field" variant of the warning, this is intentional because the field is +# never used within a particular call path, the field is within a union with +# other non-const members, or the containing object is not const so the field +# can be modified via memcpy() / memset(). While the variable warning also gets +# disabled with this same switch, there should not be too much coverage lost +# because -Wuninitialized will still flag when an uninitialized const variable +# is used. +KBUILD_CFLAGS += $(call cc-disable-warning, default-const-init-unsafe) else # gcc inanely warns about local variables called 'main' @@ -1075,6 +1087,9 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) # Require designated initializers for all marked structures KBUILD_CFLAGS += $(call cc-option,-Werror=designated-init) +# Ensure compilers do not transform certain loops into calls to wcslen() +KBUILD_CFLAGS += -fno-builtin-wcslen + # change __FILE__ to the relative path from the srctree KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=) @@ -1127,6 +1142,11 @@ endif KBUILD_USERCFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS)) KBUILD_USERLDFLAGS += $(filter -m32 -m64 --target=%, $(KBUILD_CFLAGS)) +# userspace programs are linked via the compiler, use the correct linker +ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_LD_IS_LLD),yy) +KBUILD_USERLDFLAGS += $(call cc-option, --ld-path=$(LD)) +endif + # make the checker run with the right architecture CHECKFLAGS += --arch=$(ARCH) @@ -1846,11 +1866,6 @@ rustfmt: rustfmtcheck: rustfmt_flags = --check rustfmtcheck: rustfmt -# IDE support targets -PHONY += rust-analyzer -rust-analyzer: - $(Q)$(MAKE) $(build)=rust $@ - # Misc # --------------------------------------------------------------------------- @@ -1903,6 +1918,7 @@ help: @echo ' modules - default target, build the module(s)' @echo ' modules_install - install the module' @echo ' clean - remove generated files in module directory only' + @echo ' rust-analyzer - generate rust-project.json rust-analyzer support file' @echo '' endif # KBUILD_EXTMOD @@ -2039,6 +2055,11 @@ quiet_cmd_tags = GEN $@ tags TAGS cscope gtags: FORCE $(call cmd,tags) +# IDE support targets +PHONY += rust-analyzer +rust-analyzer: + $(Q)$(MAKE) $(build)=rust $@ + # Script to generate missing namespace dependencies # --------------------------------------------------------------------------- diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h index 8049997fa372..2039a8c8d547 100644 --- a/arch/alpha/include/asm/elf.h +++ b/arch/alpha/include/asm/elf.h @@ -74,7 +74,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; /* * This is used to ensure we don't load something for the wrong architecture. */ -#define elf_check_arch(x) ((x)->e_machine == EM_ALPHA) +#define elf_check_arch(x) (((x)->e_machine == EM_ALPHA) && !((x)->e_flags & EF_ALPHA_32BIT)) /* * These are used to set parameters in the core dumps. @@ -145,10 +145,6 @@ extern int dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task); : amask (AMASK_CIX) ? "ev6" : "ev67"); \ }) -#define SET_PERSONALITY(EX) \ - set_personality(((EX).e_flags & EF_ALPHA_32BIT) \ - ? PER_LINUX_32BIT : PER_LINUX) - extern int alpha_l1i_cacheshape; extern int alpha_l1d_cacheshape; extern int alpha_l2_cacheshape; diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 9e45f6735d5d..6fe4e9deeb5e 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -322,7 +322,7 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) extern void paging_init(void); -/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ +/* We have our own get_unmapped_area */ #define HAVE_ARCH_UNMAPPED_AREA #endif /* _ALPHA_PGTABLE_H */ diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h index 714abe494e5f..916a2dd782c1 100644 --- a/arch/alpha/include/asm/processor.h +++ b/arch/alpha/include/asm/processor.h @@ -8,23 +8,19 @@ #ifndef __ASM_ALPHA_PROCESSOR_H #define __ASM_ALPHA_PROCESSOR_H -#include /* for ADDR_LIMIT_32BIT */ - /* * We have a 42-bit user address space: 4TB user VM... */ #define TASK_SIZE (0x40000000000UL) -#define STACK_TOP \ - (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) +#define STACK_TOP (0x00120000000UL) #define STACK_TOP_MAX 0x00120000000UL /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE \ - ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : TASK_SIZE / 2) +#define TASK_UNMAPPED_BASE (TASK_SIZE / 2) /* This is dead. Everything has been moved to thread_info. */ struct thread_struct { }; diff --git a/arch/alpha/include/uapi/asm/ptrace.h b/arch/alpha/include/uapi/asm/ptrace.h index c29194181025..22170f7b8be8 100644 --- a/arch/alpha/include/uapi/asm/ptrace.h +++ b/arch/alpha/include/uapi/asm/ptrace.h @@ -42,6 +42,8 @@ struct pt_regs { unsigned long trap_a0; unsigned long trap_a1; unsigned long trap_a2; +/* This makes the stack 16-byte aligned as GCC expects */ + unsigned long __pad0; /* These are saved by PAL-code: */ unsigned long ps; unsigned long pc; diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c index 2e125e5c1508..05d9296af5ea 100644 --- a/arch/alpha/kernel/asm-offsets.c +++ b/arch/alpha/kernel/asm-offsets.c @@ -32,7 +32,9 @@ void foo(void) DEFINE(CRED_EGID, offsetof(struct cred, egid)); BLANK(); + DEFINE(SP_OFF, offsetof(struct pt_regs, ps)); DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs)); + DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack)); DEFINE(PT_PTRACED, PT_PTRACED); DEFINE(CLONE_VM, CLONE_VM); DEFINE(CLONE_UNTRACED, CLONE_UNTRACED); diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S index c41a5a9c3b9f..ba99cc9d27c7 100644 --- a/arch/alpha/kernel/entry.S +++ b/arch/alpha/kernel/entry.S @@ -15,10 +15,6 @@ .set noat .cfi_sections .debug_frame -/* Stack offsets. */ -#define SP_OFF 184 -#define SWITCH_STACK_SIZE 320 - .macro CFI_START_OSF_FRAME func .align 4 .globl \func @@ -199,8 +195,8 @@ CFI_END_OSF_FRAME entArith CFI_START_OSF_FRAME entMM SAVE_ALL /* save $9 - $15 so the inline exception code can manipulate them. */ - subq $sp, 56, $sp - .cfi_adjust_cfa_offset 56 + subq $sp, 64, $sp + .cfi_adjust_cfa_offset 64 stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) @@ -215,7 +211,7 @@ CFI_START_OSF_FRAME entMM .cfi_rel_offset $13, 32 .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 - addq $sp, 56, $19 + addq $sp, 64, $19 /* handle the fault */ lda $8, 0x3fff bic $sp, $8, $8 @@ -228,7 +224,7 @@ CFI_START_OSF_FRAME entMM ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) - addq $sp, 56, $sp + addq $sp, 64, $sp .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 @@ -236,7 +232,7 @@ CFI_START_OSF_FRAME entMM .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 - .cfi_adjust_cfa_offset -56 + .cfi_adjust_cfa_offset -64 /* finish up the syscall as normal. */ br ret_from_sys_call CFI_END_OSF_FRAME entMM @@ -383,8 +379,8 @@ entUnaUser: .cfi_restore $0 .cfi_adjust_cfa_offset -256 SAVE_ALL /* setup normal kernel stack */ - lda $sp, -56($sp) - .cfi_adjust_cfa_offset 56 + lda $sp, -64($sp) + .cfi_adjust_cfa_offset 64 stq $9, 0($sp) stq $10, 8($sp) stq $11, 16($sp) @@ -400,7 +396,7 @@ entUnaUser: .cfi_rel_offset $14, 40 .cfi_rel_offset $15, 48 lda $8, 0x3fff - addq $sp, 56, $19 + addq $sp, 64, $19 bic $sp, $8, $8 jsr $26, do_entUnaUser ldq $9, 0($sp) @@ -410,7 +406,7 @@ entUnaUser: ldq $13, 32($sp) ldq $14, 40($sp) ldq $15, 48($sp) - lda $sp, 56($sp) + lda $sp, 64($sp) .cfi_restore $9 .cfi_restore $10 .cfi_restore $11 @@ -418,7 +414,7 @@ entUnaUser: .cfi_restore $13 .cfi_restore $14 .cfi_restore $15 - .cfi_adjust_cfa_offset -56 + .cfi_adjust_cfa_offset -64 br ret_from_sys_call CFI_END_OSF_FRAME entUna diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index c54469b369cb..03b70b6c9250 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -1213,8 +1213,7 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p) return ret; } -/* Get an address range which is currently unmapped. Similar to the - generic version except that we know how to honor ADDR_LIMIT_32BIT. */ +/* Get an address range which is currently unmapped. */ static unsigned long arch_get_unmapped_area_1(unsigned long addr, unsigned long len, @@ -1236,13 +1235,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - unsigned long limit; - - /* "32 bit" actually means 31 bit, since pointers sign extend. */ - if (current->personality & ADDR_LIMIT_32BIT) - limit = 0x80000000; - else - limit = TASK_SIZE; + unsigned long limit = TASK_SIZE; if (len > limit) return -ENOMEM; diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index d9a67b370e04..de72bd837c5a 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -707,7 +707,7 @@ s_reg_to_mem (unsigned long s_reg) static int unauser_reg_offsets[32] = { R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), /* r9 ... r15 are stored in front of regs. */ - -56, -48, -40, -32, -24, -16, -8, + -64, -56, -48, -40, -32, -24, -16, /* padding at -8 */ R(r16), R(r17), R(r18), R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), R(r27), R(r28), R(gp), diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 2b49aa94e4de..1179b7df4af9 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -78,8 +78,8 @@ __load_new_mm_context(struct mm_struct *next_mm) /* Macro for exception fixup code to access integer registers. */ #define dpf_reg(r) \ - (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ - (r) <= 18 ? (r)+10 : (r)-10]) + (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-17 : \ + (r) <= 18 ? (r)+11 : (r)-10]) asmlinkage void do_page_fault(unsigned long address, unsigned long mmcsr, diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 329400a1c355..8e6f52c6626d 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -6,7 +6,7 @@ KBUILD_DEFCONFIG := haps_hs_smp_defconfig ifeq ($(CROSS_COMPILE),) -CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-) +CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux- arc-linux-gnu-) endif cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__ diff --git a/arch/arm/boot/dts/bcm2711.dtsi b/arch/arm/boot/dts/bcm2711.dtsi index 941c4d16791b..27b467219a40 100644 --- a/arch/arm/boot/dts/bcm2711.dtsi +++ b/arch/arm/boot/dts/bcm2711.dtsi @@ -134,7 +134,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -145,7 +145,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -156,7 +156,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -167,7 +167,7 @@ clocks = <&clocks BCM2835_CLOCK_UART>, <&clocks BCM2835_CLOCK_VPU>; clock-names = "uartclk", "apb_pclk"; - arm,primecell-periphid = <0x00241011>; + arm,primecell-periphid = <0x00341011>; status = "disabled"; }; @@ -451,8 +451,6 @@ IRQ_TYPE_LEVEL_LOW)>, ; - /* This only applies to the ARMv7 stub */ - arm,cpu-registers-not-fw-configured; }; cpus: cpus { @@ -1154,6 +1152,7 @@ }; &uart0 { + arm,primecell-periphid = <0x00341011>; interrupts = ; }; diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi index 5733e3a4ea8e..3fdb79b0e8bf 100644 --- a/arch/arm/boot/dts/dra7-l4.dtsi +++ b/arch/arm/boot/dts/dra7-l4.dtsi @@ -12,6 +12,7 @@ ranges = <0x00000000 0x4a000000 0x100000>, /* segment 0 */ <0x00100000 0x4a100000 0x100000>, /* segment 1 */ <0x00200000 0x4a200000 0x100000>; /* segment 2 */ + dma-ranges; segment@0 { /* 0x4a000000 */ compatible = "simple-pm-bus"; @@ -557,6 +558,7 @@ <0x0007e000 0x0017e000 0x001000>, /* ap 124 */ <0x00059000 0x00159000 0x001000>, /* ap 125 */ <0x0005a000 0x0015a000 0x001000>; /* ap 126 */ + dma-ranges; target-module@2000 { /* 0x4a102000, ap 27 3c.0 */ compatible = "ti,sysc"; diff --git a/arch/arm/boot/dts/imx6qdl-apalis.dtsi b/arch/arm/boot/dts/imx6qdl-apalis.dtsi index 7c17b91f0965..dc170a68c42f 100644 --- a/arch/arm/boot/dts/imx6qdl-apalis.dtsi +++ b/arch/arm/boot/dts/imx6qdl-apalis.dtsi @@ -101,6 +101,11 @@ }; }; + poweroff { + compatible = "regulator-poweroff"; + cpu-supply = <&vgen2_reg>; + }; + reg_module_3v3: regulator-module-3v3 { compatible = "regulator-fixed"; regulator-always-on; @@ -220,10 +225,6 @@ status = "disabled"; }; -&clks { - fsl,pmic-stby-poweroff; -}; - /* Apalis SPI1 */ &ecspi1 { cs-gpios = <&gpio5 25 GPIO_ACTIVE_LOW>; @@ -511,7 +512,6 @@ pmic: pmic@8 { compatible = "fsl,pfuze100"; - fsl,pmic-stby-poweroff; reg = <0x08>; regulators { diff --git a/arch/arm/boot/dts/imx6ul-imx6ull-opos6ul.dtsi b/arch/arm/boot/dts/imx6ul-imx6ull-opos6ul.dtsi index f2386dcb9ff2..dda4fa91b2f2 100644 --- a/arch/arm/boot/dts/imx6ul-imx6ull-opos6ul.dtsi +++ b/arch/arm/boot/dts/imx6ul-imx6ull-opos6ul.dtsi @@ -40,6 +40,9 @@ reg = <1>; interrupt-parent = <&gpio4>; interrupts = <16 IRQ_TYPE_LEVEL_LOW>; + micrel,led-mode = <1>; + clocks = <&clks IMX6UL_CLK_ENET_REF>; + clock-names = "rmii-ref"; status = "okay"; }; }; diff --git a/arch/arm/boot/dts/imxrt1050.dtsi b/arch/arm/boot/dts/imxrt1050.dtsi index 03e6a858a7be..a25eae9bd38a 100644 --- a/arch/arm/boot/dts/imxrt1050.dtsi +++ b/arch/arm/boot/dts/imxrt1050.dtsi @@ -87,7 +87,7 @@ reg = <0x402c0000 0x4000>; interrupts = <110>; clocks = <&clks IMXRT1050_CLK_IPG_PDOF>, - <&clks IMXRT1050_CLK_OSC>, + <&clks IMXRT1050_CLK_AHB_PODF>, <&clks IMXRT1050_CLK_USDHC1>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi index 25d31e40a553..74767b670372 100644 --- a/arch/arm/boot/dts/mt7623.dtsi +++ b/arch/arm/boot/dts/mt7623.dtsi @@ -309,7 +309,7 @@ clock-names = "spi", "wrap"; }; - cir: cir@10013000 { + cir: ir-receiver@10013000 { compatible = "mediatek,mt7623-cir"; reg = <0 0x10013000 0 0x1000>; interrupts = ; diff --git a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts index c8ca8cb7f5c9..52ad95a2063a 100644 --- a/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts +++ b/arch/arm/boot/dts/sun9i-a80-cubieboard4.dts @@ -280,8 +280,8 @@ reg_dcdc5: dcdc5 { regulator-always-on; - regulator-min-microvolt = <1425000>; - regulator-max-microvolt = <1575000>; + regulator-min-microvolt = <1450000>; + regulator-max-microvolt = <1550000>; regulator-name = "vcc-dram"; }; diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi index 09996acad639..bb23bb39dd5b 100644 --- a/arch/arm/boot/dts/tegra114.dtsi +++ b/arch/arm/boot/dts/tegra114.dtsi @@ -139,7 +139,7 @@ reg = <0x54400000 0x00040000>; clocks = <&tegra_car TEGRA114_CLK_DSIB>, <&tegra_car TEGRA114_CLK_DSIBLP>, - <&tegra_car TEGRA114_CLK_PLL_D2_OUT0>; + <&tegra_car TEGRA114_CLK_PLL_D_OUT0>; clock-names = "dsi", "lp", "parent"; resets = <&tegra_car 82>; reset-names = "dsi"; diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index fe3a24fe6a00..d5c13897ed99 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -25,6 +25,7 @@ #include #include #include +#include #include "entry-header.S" #include @@ -787,6 +788,13 @@ ENTRY(__switch_to) @ entries covering the vmalloc region. @ ldr r2, [ip] +#ifdef CONFIG_KASAN_VMALLOC + @ Also dummy read from the KASAN shadow memory for the new stack if we + @ are using KASAN + mov_l r2, KASAN_SHADOW_OFFSET + add r2, r2, ip, lsr #KASAN_SHADOW_SCALE_SHIFT + ldr r2, [r2] +#endif #endif @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index bac234bbd8d1..fa1bb8cb791f 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -253,11 +253,15 @@ __create_page_tables: */ add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER) ldr r6, =(_end - 1) + + /* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */ +#ifndef CONFIG_XIP_KERNEL adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 str r8, [r5, #4] @ Save physical start of kernel (BE) #else str r8, [r5] @ Save physical start of kernel (LE) +#endif #endif orr r3, r8, r7 @ Add the MMU flags add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) @@ -265,6 +269,7 @@ __create_page_tables: add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 bls 1b +#ifndef CONFIG_XIP_KERNEL eor r3, r3, r7 @ Remove the MMU flags adr_l r5, kernel_sec_end @ _pa(kernel_sec_end) #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 @@ -272,8 +277,7 @@ __create_page_tables: #else str r3, [r5] @ Save physical end of kernel (LE) #endif - -#ifdef CONFIG_XIP_KERNEL +#else /* * Map the kernel image separately as it is not located in RAM. */ @@ -408,7 +412,11 @@ ENTRY(secondary_startup) /* * Use the page tables supplied from __cpu_up. */ +#ifdef CONFIG_XIP_KERNEL + ldr r3, =(secondary_data + PLAT_PHYS_OFFSET - PAGE_OFFSET) +#else adr_l r3, secondary_data +#endif mov_l r12, __secondary_switched ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE: diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c index 059db0b1c1de..9ac01895e716 100644 --- a/arch/arm/kernel/psci_smp.c +++ b/arch/arm/kernel/psci_smp.c @@ -45,8 +45,15 @@ extern void secondary_startup(void); static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle) { if (psci_ops.cpu_on) +#ifdef CONFIG_XIP_KERNEL + return psci_ops.cpu_on(cpu_logical_map(cpu), + ((phys_addr_t)(&secondary_startup) + - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + + CONFIG_XIP_PHYS_ADDR)); +#else return psci_ops.cpu_on(cpu_logical_map(cpu), virt_to_idmap(&secondary_startup)); +#endif return -ENODEV; } diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 437dd0352fd4..77aec670f635 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -537,11 +537,12 @@ extern u32 at91_pm_suspend_in_sram_sz; static int at91_suspend_finish(unsigned long val) { - unsigned char modified_gray_code[] = { - 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d, - 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b, - 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13, - 0x10, 0x11, + /* SYNOPSYS workaround to fix a bug in the calibration logic */ + unsigned char modified_fix_code[] = { + 0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18, + 0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13, + 0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14, + 0x1e, 0x1f, }; unsigned int tmp, index; int i; @@ -552,25 +553,25 @@ static int at91_suspend_finish(unsigned long val) * restore the ZQ0SR0 with the value saved here. But the * calibration is buggy and restoring some values from ZQ0SR0 * is forbidden and risky thus we need to provide processed - * values for these (modified gray code values). + * values for these. */ tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0); /* Store pull-down output impedance select. */ index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f; - soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index]; + soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF; /* Store pull-up output impedance select. */ index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f; - soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; + soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF; /* Store pull-down on-die termination impedance select. */ index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f; - soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; + soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF; /* Store pull-up on-die termination impedance select. */ index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f; - soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; + soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF; /* * The 1st 8 words of memory might get corrupted in the process @@ -590,7 +591,21 @@ static int at91_suspend_finish(unsigned long val) return 0; } -static void at91_pm_switch_ba_to_vbat(void) +/** + * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch + * to automatic/hardware mode. + * + * The Backup Unit Power Switch can be managed either by software or hardware. + * Enabling hardware mode allows the automatic transition of power between + * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the + * availability of these power sources. + * + * If the Backup Unit Power Switch is already in automatic mode, no action is + * required. If it is in software-controlled mode, it is switched to automatic + * mode to enhance safety and eliminate the need for toggling between power + * sources. + */ +static void at91_pm_switch_ba_to_auto(void) { unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu); unsigned int val; @@ -601,24 +616,19 @@ static void at91_pm_switch_ba_to_vbat(void) val = readl(soc_pm.data.sfrbu + offset); - /* Already on VBAT. */ - if (!(val & soc_pm.sfrbu_regs.pswbu.state)) + /* Already on auto/hardware. */ + if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl)) return; - val &= ~soc_pm.sfrbu_regs.pswbu.softsw; - val |= soc_pm.sfrbu_regs.pswbu.key | soc_pm.sfrbu_regs.pswbu.ctrl; + val &= ~soc_pm.sfrbu_regs.pswbu.ctrl; + val |= soc_pm.sfrbu_regs.pswbu.key; writel(val, soc_pm.data.sfrbu + offset); - - /* Wait for update. */ - val = readl(soc_pm.data.sfrbu + offset); - while (val & soc_pm.sfrbu_regs.pswbu.state) - val = readl(soc_pm.data.sfrbu + offset); } static void at91_pm_suspend(suspend_state_t state) { if (soc_pm.data.mode == AT91_PM_BACKUP) { - at91_pm_switch_ba_to_vbat(); + at91_pm_switch_ba_to_auto(); cpu_suspend(0, at91_suspend_finish); diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig index 7ec7ada287e0..16bf3bed38fe 100644 --- a/arch/arm/mach-omap1/Kconfig +++ b/arch/arm/mach-omap1/Kconfig @@ -9,6 +9,7 @@ menuconfig ARCH_OMAP1 select ARCH_OMAP select CLKSRC_MMIO select FORCE_PCI if PCCARD + select GENERIC_IRQ_CHIP select GPIOLIB help Support for older TI OMAP1 (omap7xx, omap15xx or omap16xx) diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S index 9466ae61f56a..b45c68d88275 100644 --- a/arch/arm/mach-shmobile/headsmp.S +++ b/arch/arm/mach-shmobile/headsmp.S @@ -136,6 +136,7 @@ ENDPROC(shmobile_smp_sleep) .long shmobile_smp_arg - 1b .bss + .align 2 .globl shmobile_smp_mpidr shmobile_smp_mpidr: .space NR_CPUS * 4 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index b0db85310331..16a7765511f8 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -27,6 +27,13 @@ #ifdef CONFIG_MMU +bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size) +{ + unsigned long addr = (unsigned long)unsafe_src; + + return addr >= TASK_SIZE && ULONG_MAX - addr >= size; +} + /* * This is useful to dump out the page tables associated with * 'addr' in mm 'mm'. @@ -552,6 +559,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) return; + pr_alert("8<--- cut here ---\n"); pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", inf->name, ifsr, addr); diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 448e57c6f653..4a833e89782a 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -84,8 +84,15 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start, unsigned long addr, end; unsigned long next; +#ifdef CONFIG_XIP_KERNEL + addr = (phys_addr_t)(text_start) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + + CONFIG_XIP_PHYS_ADDR; + end = (phys_addr_t)(text_end) - XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) + + CONFIG_XIP_PHYS_ADDR; +#else addr = virt_to_idmap(text_start); end = virt_to_idmap(text_end); +#endif pr_info("Setting up static identity map for 0x%lx - 0x%lx\n", addr, end); prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 2129070065c3..1c5aeba9bc27 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -23,6 +23,7 @@ */ #include #include +#include #include #include #include @@ -115,16 +116,40 @@ int ioremap_page(unsigned long virt, unsigned long phys, } EXPORT_SYMBOL(ioremap_page); +#ifdef CONFIG_KASAN +static unsigned long arm_kasan_mem_to_shadow(unsigned long addr) +{ + return (unsigned long)kasan_mem_to_shadow((void *)addr); +} +#else +static unsigned long arm_kasan_mem_to_shadow(unsigned long addr) +{ + return 0; +} +#endif + +static void memcpy_pgd(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ + end = ALIGN(end, PGDIR_SIZE); + memcpy(pgd_offset(mm, start), pgd_offset_k(start), + sizeof(pgd_t) * (pgd_index(end) - pgd_index(start))); +} + void __check_vmalloc_seq(struct mm_struct *mm) { int seq; do { - seq = atomic_read(&init_mm.context.vmalloc_seq); - memcpy(pgd_offset(mm, VMALLOC_START), - pgd_offset_k(VMALLOC_START), - sizeof(pgd_t) * (pgd_index(VMALLOC_END) - - pgd_index(VMALLOC_START))); + seq = atomic_read_acquire(&init_mm.context.vmalloc_seq); + memcpy_pgd(mm, VMALLOC_START, VMALLOC_END); + if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) { + unsigned long start = + arm_kasan_mem_to_shadow(VMALLOC_START); + unsigned long end = + arm_kasan_mem_to_shadow(VMALLOC_END); + memcpy_pgd(mm, start, end); + } /* * Use a store-release so that other CPUs that observe the * counter's new value are guaranteed to see the results of the diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 463fc2a8448f..a39a7043f189 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1401,18 +1401,6 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) create_mapping(&map); } - /* - * Map the kernel if it is XIP. - * It is always first in the modulearea. - */ -#ifdef CONFIG_XIP_KERNEL - map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); - map.virtual = MODULES_VADDR; - map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; - map.type = MT_ROM; - create_mapping(&map); -#endif - /* * Map the cache flushing regions. */ @@ -1602,12 +1590,27 @@ static void __init map_kernel(void) * This will only persist until we turn on proper memory management later on * and we remap the whole kernel with page granularity. */ +#ifdef CONFIG_XIP_KERNEL + phys_addr_t kernel_nx_start = kernel_sec_start; +#else phys_addr_t kernel_x_start = kernel_sec_start; phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); phys_addr_t kernel_nx_start = kernel_x_end; +#endif phys_addr_t kernel_nx_end = kernel_sec_end; struct map_desc map; + /* + * Map the kernel if it is XIP. + * It is always first in the modulearea. + */ +#ifdef CONFIG_XIP_KERNEL + map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); + map.virtual = MODULES_VADDR; + map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; + map.type = MT_ROM; + create_mapping(&map); +#else map.pfn = __phys_to_pfn(kernel_x_start); map.virtual = __phys_to_virt(kernel_x_start); map.length = kernel_x_end - kernel_x_start; @@ -1617,7 +1620,7 @@ static void __init map_kernel(void) /* If the nx part is small it may end up covered by the tail of the RWX section */ if (kernel_x_end == kernel_nx_end) return; - +#endif map.pfn = __phys_to_pfn(kernel_nx_start); map.virtual = __phys_to_virt(kernel_nx_start); map.length = kernel_nx_end - kernel_nx_start; @@ -1762,6 +1765,11 @@ void __init paging_init(const struct machine_desc *mdesc) { void *zero_page; +#ifdef CONFIG_XIP_KERNEL + /* Store the kernel RW RAM region start/end in these variables */ + kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK; + kernel_sec_end = round_up(__pa(_end), SECTION_SIZE); +#endif pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n", kernel_sec_start, kernel_sec_end); diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi index 87847116ab6d..b0885a389951 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi @@ -202,6 +202,9 @@ interrupts = <7 5 IRQ_TYPE_EDGE_RISING>; /* PH5 */ vdd-supply = <®_dldo1>; vddio-supply = <®_dldo1>; + mount-matrix = "0", "1", "0", + "-1", "0", "0", + "0", "0", "1"; }; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts index 381d58cea092..c854c7e31051 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-beelink-gs1.dts @@ -151,28 +151,12 @@ vcc-pg-supply = <®_aldo1>; }; -&r_ir { - linux,rc-map-name = "rc-beelink-gs1"; - status = "okay"; -}; - -&r_pio { - /* - * FIXME: We can't add that supply for now since it would - * create a circular dependency between pinctrl, the regulator - * and the RSB Bus. - * - * vcc-pl-supply = <®_aldo1>; - */ - vcc-pm-supply = <®_aldo1>; -}; - -&r_rsb { +&r_i2c { status = "okay"; - axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = ; interrupt-controller; @@ -290,6 +274,22 @@ }; }; +&r_ir { + linux,rc-map-name = "rc-beelink-gs1"; + status = "okay"; +}; + +&r_pio { + /* + * PL0 and PL1 are used for PMIC I2C + * don't enable the pl-supply else + * it will fail at boot + * + * vcc-pl-supply = <®_aldo1>; + */ + vcc-pm-supply = <®_aldo1>; +}; + &spdif { pinctrl-names = "default"; pinctrl-0 = <&spdif_tx_pin>; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts index 6fc65e8db220..8c476e089185 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi-3.dts @@ -175,16 +175,12 @@ vcc-pg-supply = <®_vcc_wifi_io>; }; -&r_ir { - status = "okay"; -}; - -&r_rsb { +&r_i2c { status = "okay"; - axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = ; interrupt-controller; @@ -295,6 +291,10 @@ }; }; +&r_ir { + status = "okay"; +}; + &rtc { clocks = <&ext_osc32k>; }; diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi index 92745128fcfe..4ec4996592be 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-orangepi.dtsi @@ -112,20 +112,12 @@ vcc-pg-supply = <®_aldo1>; }; -&r_ir { - status = "okay"; -}; - -&r_pio { - vcc-pm-supply = <®_bldo3>; -}; - -&r_rsb { +&r_i2c { status = "okay"; - axp805: pmic@745 { + axp805: pmic@36 { compatible = "x-powers,axp805", "x-powers,axp806"; - reg = <0x745>; + reg = <0x36>; interrupt-parent = <&r_intc>; interrupts = ; interrupt-controller; @@ -240,6 +232,14 @@ }; }; +&r_ir { + status = "okay"; +}; + +&r_pio { + vcc-pm-supply = <®_bldo3>; +}; + &rtc { clocks = <&ext_osc32k>; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi index 7c3f5c54f040..d84ae7571d23 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin-dahlia.dtsi @@ -16,10 +16,10 @@ "Headphone Jack", "HPOUTR", "IN2L", "Line In Jack", "IN2R", "Line In Jack", - "Headphone Jack", "MICBIAS", - "IN1L", "Headphone Jack"; + "Microphone Jack", "MICBIAS", + "IN1L", "Microphone Jack"; simple-audio-card,widgets = - "Microphone", "Headphone Jack", + "Microphone", "Microphone Jack", "Headphone", "Headphone Jack", "Line", "Line In Jack"; diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi index 0dd2f79c4f20..fb75800e8999 100644 --- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi @@ -141,7 +141,20 @@ regulator-max-microvolt = <3300000>; regulator-min-microvolt = <3300000>; regulator-name = "+V3.3_SD"; - startup-delay-us = <2000>; + startup-delay-us = <20000>; + }; + + reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc { + compatible = "regulator-gpio"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc2_vsel>; + gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>; + regulator-max-microvolt = <3300000>; + regulator-min-microvolt = <1800000>; + states = <1800000 0x1>, + <3300000 0x0>; + regulator-name = "PMIC_USDHC_VSELECT"; + vin-supply = <®_nvcc_sd>; }; reserved-memory { @@ -262,7 +275,7 @@ "SODIMM_19", "", "", - "", + "PMIC_USDHC_VSELECT", "", "", "", @@ -788,6 +801,7 @@ pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>; pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>; vmmc-supply = <®_usdhc2_vmmc>; + vqmmc-supply = <®_usdhc2_vqmmc>; }; &wdog1 { @@ -1210,13 +1224,17 @@ ; /* SODIMM 76 */ }; + pinctrl_usdhc2_vsel: usdhc2vselgrp { + fsl,pins = + ; /* PMIC_USDHC_VSELECT */ + }; + /* * Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the * on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here. */ pinctrl_usdhc2: usdhc2grp { fsl,pins = - , , /* SODIMM 78 */ , /* SODIMM 74 */ , /* SODIMM 80 */ @@ -1227,7 +1245,6 @@ pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { fsl,pins = - , , , , @@ -1238,7 +1255,6 @@ pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { fsl,pins = - , , , , @@ -1250,7 +1266,6 @@ /* Avoid backfeeding with removed card power */ pinctrl_usdhc2_sleep: usdhc2slpgrp { fsl,pins = - , , , , diff --git a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi index 7bd680a926ce..c63144f2456e 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-tqma8mpql.dtsi @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-or-later OR MIT /* - * Copyright 2021-2022 TQ-Systems GmbH - * Author: Alexander Stein + * Copyright 2021-2025 TQ-Systems GmbH , + * D-82229 Seefeld, Germany. + * Author: Alexander Stein */ #include "imx8mp.dtsi" @@ -23,15 +24,6 @@ regulator-max-microvolt = <3300000>; regulator-always-on; }; - - /* e-MMC IO, needed for HS modes */ - reg_vcc1v8: regulator-vcc1v8 { - compatible = "regulator-fixed"; - regulator-name = "VCC1V8"; - regulator-min-microvolt = <1800000>; - regulator-max-microvolt = <1800000>; - regulator-always-on; - }; }; &A53_0 { @@ -194,7 +186,7 @@ no-sd; no-sdio; vmmc-supply = <®_vcc3v3>; - vqmmc-supply = <®_vcc1v8>; + vqmmc-supply = <&buck5_reg>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi index 371144eb4018..3d25644965b3 100644 --- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi @@ -133,7 +133,7 @@ regulator-max-microvolt = <3300000>; regulator-min-microvolt = <3300000>; regulator-name = "+V3.3_SD"; - startup-delay-us = <2000>; + startup-delay-us = <20000>; }; reserved-memory { diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi index bdcd35cecad9..fd6230352f4f 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173-elm-hana.dtsi @@ -43,6 +43,14 @@ interrupts = <117 IRQ_TYPE_LEVEL_LOW>; reg = <0x2c>; hid-descr-addr = <0x0020>; + /* + * The trackpad needs a post-power-on delay of 100ms, + * but at time of writing, the power supply for it on + * this board is always on. The delay is therefore not + * added to avoid impacting the readiness of the + * trackpad. + */ + vdd-supply = <&mt6397_vgp6_reg>; wakeup-source; }; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi index e21feb85d822..1135ed0bf90c 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173-elm.dtsi @@ -922,7 +922,7 @@ interrupt-controller; #interrupt-cells = <2>; - clock: mt6397clock { + clock: clocks { compatible = "mediatek,mt6397-clk"; #clock-cells = <1>; }; @@ -934,11 +934,10 @@ #gpio-cells = <2>; }; - regulator: mt6397regulator { + regulators { compatible = "mediatek,mt6397-regulator"; mt6397_vpca15_reg: buck_vpca15 { - regulator-compatible = "buck_vpca15"; regulator-name = "vpca15"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -948,7 +947,6 @@ }; mt6397_vpca7_reg: buck_vpca7 { - regulator-compatible = "buck_vpca7"; regulator-name = "vpca7"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -958,7 +956,6 @@ }; mt6397_vsramca15_reg: buck_vsramca15 { - regulator-compatible = "buck_vsramca15"; regulator-name = "vsramca15"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -967,7 +964,6 @@ }; mt6397_vsramca7_reg: buck_vsramca7 { - regulator-compatible = "buck_vsramca7"; regulator-name = "vsramca7"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -976,7 +972,6 @@ }; mt6397_vcore_reg: buck_vcore { - regulator-compatible = "buck_vcore"; regulator-name = "vcore"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -985,7 +980,6 @@ }; mt6397_vgpu_reg: buck_vgpu { - regulator-compatible = "buck_vgpu"; regulator-name = "vgpu"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -994,7 +988,6 @@ }; mt6397_vdrm_reg: buck_vdrm { - regulator-compatible = "buck_vdrm"; regulator-name = "vdrm"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1400000>; @@ -1003,7 +996,6 @@ }; mt6397_vio18_reg: buck_vio18 { - regulator-compatible = "buck_vio18"; regulator-name = "vio18"; regulator-min-microvolt = <1620000>; regulator-max-microvolt = <1980000>; @@ -1012,18 +1004,15 @@ }; mt6397_vtcxo_reg: ldo_vtcxo { - regulator-compatible = "ldo_vtcxo"; regulator-name = "vtcxo"; regulator-always-on; }; mt6397_va28_reg: ldo_va28 { - regulator-compatible = "ldo_va28"; regulator-name = "va28"; }; mt6397_vcama_reg: ldo_vcama { - regulator-compatible = "ldo_vcama"; regulator-name = "vcama"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -1031,18 +1020,15 @@ }; mt6397_vio28_reg: ldo_vio28 { - regulator-compatible = "ldo_vio28"; regulator-name = "vio28"; regulator-always-on; }; mt6397_vusb_reg: ldo_vusb { - regulator-compatible = "ldo_vusb"; regulator-name = "vusb"; }; mt6397_vmc_reg: ldo_vmc { - regulator-compatible = "ldo_vmc"; regulator-name = "vmc"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; @@ -1050,7 +1036,6 @@ }; mt6397_vmch_reg: ldo_vmch { - regulator-compatible = "ldo_vmch"; regulator-name = "vmch"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3300000>; @@ -1058,7 +1043,6 @@ }; mt6397_vemc_3v3_reg: ldo_vemc3v3 { - regulator-compatible = "ldo_vemc3v3"; regulator-name = "vemc_3v3"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3300000>; @@ -1066,7 +1050,6 @@ }; mt6397_vgp1_reg: ldo_vgp1 { - regulator-compatible = "ldo_vgp1"; regulator-name = "vcamd"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -1074,7 +1057,6 @@ }; mt6397_vgp2_reg: ldo_vgp2 { - regulator-compatible = "ldo_vgp2"; regulator-name = "vcamio"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; @@ -1082,7 +1064,6 @@ }; mt6397_vgp3_reg: ldo_vgp3 { - regulator-compatible = "ldo_vgp3"; regulator-name = "vcamaf"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; @@ -1090,7 +1071,6 @@ }; mt6397_vgp4_reg: ldo_vgp4 { - regulator-compatible = "ldo_vgp4"; regulator-name = "vgp4"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3300000>; @@ -1098,7 +1078,6 @@ }; mt6397_vgp5_reg: ldo_vgp5 { - regulator-compatible = "ldo_vgp5"; regulator-name = "vgp5"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3000000>; @@ -1106,7 +1085,6 @@ }; mt6397_vgp6_reg: ldo_vgp6 { - regulator-compatible = "ldo_vgp6"; regulator-name = "vgp6"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; @@ -1115,7 +1093,6 @@ }; mt6397_vibr_reg: ldo_vibr { - regulator-compatible = "ldo_vibr"; regulator-name = "vibr"; regulator-min-microvolt = <1300000>; regulator-max-microvolt = <3300000>; @@ -1123,7 +1100,7 @@ }; }; - rtc: mt6397rtc { + rtc: rtc { compatible = "mediatek,mt6397-rtc"; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts index 49c7185243cc..8bc3ea1a7fbc 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts +++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts @@ -307,11 +307,10 @@ interrupt-controller; #interrupt-cells = <2>; - mt6397regulator: mt6397regulator { + regulators { compatible = "mediatek,mt6397-regulator"; mt6397_vpca15_reg: buck_vpca15 { - regulator-compatible = "buck_vpca15"; regulator-name = "vpca15"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -320,7 +319,6 @@ }; mt6397_vpca7_reg: buck_vpca7 { - regulator-compatible = "buck_vpca7"; regulator-name = "vpca7"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -329,7 +327,6 @@ }; mt6397_vsramca15_reg: buck_vsramca15 { - regulator-compatible = "buck_vsramca15"; regulator-name = "vsramca15"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -338,7 +335,6 @@ }; mt6397_vsramca7_reg: buck_vsramca7 { - regulator-compatible = "buck_vsramca7"; regulator-name = "vsramca7"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -347,7 +343,6 @@ }; mt6397_vcore_reg: buck_vcore { - regulator-compatible = "buck_vcore"; regulator-name = "vcore"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -356,7 +351,6 @@ }; mt6397_vgpu_reg: buck_vgpu { - regulator-compatible = "buck_vgpu"; regulator-name = "vgpu"; regulator-min-microvolt = < 700000>; regulator-max-microvolt = <1350000>; @@ -365,7 +359,6 @@ }; mt6397_vdrm_reg: buck_vdrm { - regulator-compatible = "buck_vdrm"; regulator-name = "vdrm"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <1400000>; @@ -374,7 +367,6 @@ }; mt6397_vio18_reg: buck_vio18 { - regulator-compatible = "buck_vio18"; regulator-name = "vio18"; regulator-min-microvolt = <1620000>; regulator-max-microvolt = <1980000>; @@ -383,19 +375,16 @@ }; mt6397_vtcxo_reg: ldo_vtcxo { - regulator-compatible = "ldo_vtcxo"; regulator-name = "vtcxo"; regulator-always-on; }; mt6397_va28_reg: ldo_va28 { - regulator-compatible = "ldo_va28"; regulator-name = "va28"; regulator-always-on; }; mt6397_vcama_reg: ldo_vcama { - regulator-compatible = "ldo_vcama"; regulator-name = "vcama"; regulator-min-microvolt = <1500000>; regulator-max-microvolt = <2800000>; @@ -403,18 +392,15 @@ }; mt6397_vio28_reg: ldo_vio28 { - regulator-compatible = "ldo_vio28"; regulator-name = "vio28"; regulator-always-on; }; mt6397_vusb_reg: ldo_vusb { - regulator-compatible = "ldo_vusb"; regulator-name = "vusb"; }; mt6397_vmc_reg: ldo_vmc { - regulator-compatible = "ldo_vmc"; regulator-name = "vmc"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; @@ -422,7 +408,6 @@ }; mt6397_vmch_reg: ldo_vmch { - regulator-compatible = "ldo_vmch"; regulator-name = "vmch"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3300000>; @@ -430,7 +415,6 @@ }; mt6397_vemc_3v3_reg: ldo_vemc3v3 { - regulator-compatible = "ldo_vemc3v3"; regulator-name = "vemc_3v3"; regulator-min-microvolt = <3000000>; regulator-max-microvolt = <3300000>; @@ -438,7 +422,6 @@ }; mt6397_vgp1_reg: ldo_vgp1 { - regulator-compatible = "ldo_vgp1"; regulator-name = "vcamd"; regulator-min-microvolt = <1220000>; regulator-max-microvolt = <3300000>; @@ -446,7 +429,6 @@ }; mt6397_vgp2_reg: ldo_vgp2 { - regulator-compatible = "ldo_vgp2"; regulator-name = "vcamio"; regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3300000>; @@ -454,7 +436,6 @@ }; mt6397_vgp3_reg: ldo_vgp3 { - regulator-compatible = "ldo_vgp3"; regulator-name = "vcamaf"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3300000>; @@ -462,7 +443,6 @@ }; mt6397_vgp4_reg: ldo_vgp4 { - regulator-compatible = "ldo_vgp4"; regulator-name = "vgp4"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3300000>; @@ -470,7 +450,6 @@ }; mt6397_vgp5_reg: ldo_vgp5 { - regulator-compatible = "ldo_vgp5"; regulator-name = "vgp5"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3000000>; @@ -478,7 +457,6 @@ }; mt6397_vgp6_reg: ldo_vgp6 { - regulator-compatible = "ldo_vgp6"; regulator-name = "vgp6"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3300000>; @@ -486,7 +464,6 @@ }; mt6397_vibr_reg: ldo_vibr { - regulator-compatible = "ldo_vibr"; regulator-name = "vibr"; regulator-min-microvolt = <1300000>; regulator-max-microvolt = <3300000>; diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi index 7640b5158ff9..256df3a2d823 100644 --- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi @@ -1247,8 +1247,7 @@ }; pwm0: pwm@1401e000 { - compatible = "mediatek,mt8173-disp-pwm", - "mediatek,mt6595-disp-pwm"; + compatible = "mediatek,mt8173-disp-pwm"; reg = <0 0x1401e000 0 0x1000>; #pwm-cells = <2>; clocks = <&mmsys CLK_MM_DISP_PWM026M>, @@ -1258,8 +1257,7 @@ }; pwm1: pwm@1401f000 { - compatible = "mediatek,mt8173-disp-pwm", - "mediatek,mt6595-disp-pwm"; + compatible = "mediatek,mt8173-disp-pwm"; reg = <0 0x1401f000 0 0x1000>; #pwm-cells = <2>; clocks = <&mmsys CLK_MM_DISP_PWM126M>, diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts index 1a2ec0787d3c..09c5dca12fb0 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-burnet.dts @@ -29,3 +29,6 @@ }; }; +&i2c2 { + i2c-scl-internal-delay-ns = <4100>; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts index 072133fb0f01..47905f84bc16 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-cozmo.dts @@ -17,6 +17,8 @@ }; &i2c2 { + i2c-scl-internal-delay-ns = <25000>; + trackpad@2c { compatible = "hid-over-i2c"; reg = <0x2c>; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts index 0eca3ff8672a..ca4196870f9d 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-damu.dts @@ -26,7 +26,14 @@ hid-descr-addr = <0x0001>; }; +&mt6358codec { + mediatek,dmic-mode = <1>; /* one-wire */ +}; + &qca_wifi { qcom,ath10k-calibration-variant = "GO_DAMU"; }; +&i2c2 { + i2c-scl-internal-delay-ns = <20000>; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi index bbe6c338f465..f9c1ec366b26 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-fennel.dtsi @@ -25,3 +25,6 @@ }; }; +&i2c2 { + i2c-scl-internal-delay-ns = <21500>; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts index 8fa89db03e63..328294245a79 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-kenzo.dts @@ -11,3 +11,18 @@ model = "Google kenzo sku17 board"; compatible = "google,juniper-sku17", "google,juniper", "mediatek,mt8183"; }; + +&i2c0 { + touchscreen@40 { + compatible = "hid-over-i2c"; + reg = <0x40>; + + pinctrl-names = "default"; + pinctrl-0 = <&touchscreen_pins>; + + interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>; + + post-power-on-delay-ms = <70>; + hid-descr-addr = <0x0001>; + }; +}; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi index 76d33540166f..c942e461a177 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi-willow.dtsi @@ -6,6 +6,21 @@ /dts-v1/; #include "mt8183-kukui-jacuzzi.dtsi" +&i2c0 { + touchscreen@40 { + compatible = "hid-over-i2c"; + reg = <0x40>; + + pinctrl-names = "default"; + pinctrl-0 = <&touchscreen_pins>; + + interrupts-extended = <&pio 155 IRQ_TYPE_LEVEL_LOW>; + + post-power-on-delay-ms = <70>; + hid-descr-addr = <0x0001>; + }; +}; + &i2c2 { trackpad@2c { compatible = "hid-over-i2c"; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi index d7fc924a9d0e..8e0575f8c1b2 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-jacuzzi.dtsi @@ -8,47 +8,37 @@ #include / { - panel: panel { - compatible = "auo,b116xw03"; - power-supply = <&pp3300_panel>; - backlight = <&backlight_lcd0>; - - port { - panel_in: endpoint { - remote-endpoint = <&anx7625_out>; - }; - }; - }; - - pp1200_mipibrdg: pp1200-mipibrdg { + pp1000_mipibrdg: pp1000-mipibrdg { compatible = "regulator-fixed"; - regulator-name = "pp1200_mipibrdg"; + regulator-name = "pp1000_mipibrdg"; + regulator-min-microvolt = <1000000>; + regulator-max-microvolt = <1000000>; pinctrl-names = "default"; - pinctrl-0 = <&pp1200_mipibrdg_en>; + pinctrl-0 = <&pp1000_mipibrdg_en>; enable-active-high; regulator-boot-on; gpio = <&pio 54 GPIO_ACTIVE_HIGH>; + vin-supply = <&pp1800_alw>; }; pp1800_mipibrdg: pp1800-mipibrdg { compatible = "regulator-fixed"; regulator-name = "pp1800_mipibrdg"; pinctrl-names = "default"; - pinctrl-0 = <&pp1800_lcd_en>; + pinctrl-0 = <&pp1800_mipibrdg_en>; enable-active-high; regulator-boot-on; gpio = <&pio 36 GPIO_ACTIVE_HIGH>; + vin-supply = <&pp1800_alw>; }; pp3300_panel: pp3300-panel { compatible = "regulator-fixed"; regulator-name = "pp3300_panel"; - regulator-min-microvolt = <3300000>; - regulator-max-microvolt = <3300000>; pinctrl-names = "default"; pinctrl-0 = <&pp3300_panel_pins>; @@ -56,18 +46,20 @@ regulator-boot-on; gpio = <&pio 35 GPIO_ACTIVE_HIGH>; + vin-supply = <&pp3300_alw>; }; - vddio_mipibrdg: vddio-mipibrdg { + pp3300_mipibrdg: pp3300-mipibrdg { compatible = "regulator-fixed"; - regulator-name = "vddio_mipibrdg"; + regulator-name = "pp3300_mipibrdg"; pinctrl-names = "default"; - pinctrl-0 = <&vddio_mipibrdg_en>; + pinctrl-0 = <&pp3300_mipibrdg_en>; enable-active-high; regulator-boot-on; gpio = <&pio 37 GPIO_ACTIVE_HIGH>; + vin-supply = <&pp3300_alw>; }; volume_buttons: volume-buttons { @@ -164,9 +156,9 @@ panel_flags = <1>; enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>; reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>; - vdd10-supply = <&pp1200_mipibrdg>; + vdd10-supply = <&pp1000_mipibrdg>; vdd18-supply = <&pp1800_mipibrdg>; - vdd33-supply = <&vddio_mipibrdg>; + vdd33-supply = <&pp3300_mipibrdg>; ports { #address-cells = <1>; @@ -188,6 +180,20 @@ }; }; }; + + aux-bus { + panel: panel { + compatible = "edp-panel"; + power-supply = <&pp3300_panel>; + backlight = <&backlight_lcd0>; + + port { + panel_in: endpoint { + remote-endpoint = <&anx7625_out>; + }; + }; + }; + }; }; }; @@ -395,14 +401,14 @@ "", ""; - pp1200_mipibrdg_en: pp1200-mipibrdg-en { + pp1000_mipibrdg_en: pp1000-mipibrdg-en { pins1 { pinmux = ; output-low; }; }; - pp1800_lcd_en: pp1800-lcd-en { + pp1800_mipibrdg_en: pp1800-mipibrdg-en { pins1 { pinmux = ; output-low; @@ -464,7 +470,7 @@ }; }; - vddio_mipibrdg_en: vddio-mipibrdg-en { + pp3300_mipibrdg_en: pp3300-mipibrdg-en { pins1 { pinmux = ; output-low; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi index 0d3c7b8162ff..9eca1c80fe01 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kakadu.dtsi @@ -105,9 +105,9 @@ clock-frequency = <400000>; vbus-supply = <&mt6358_vcn18_reg>; - eeprom@54 { + eeprom@50 { compatible = "atmel,24c32"; - reg = <0x54>; + reg = <0x50>; pagesize = <32>; vcc-supply = <&mt6358_vcn18_reg>; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi index e73113cb51f5..29216ebe4de8 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-kodama.dtsi @@ -80,9 +80,9 @@ clock-frequency = <400000>; vbus-supply = <&mt6358_vcn18_reg>; - eeprom@54 { + eeprom@50 { compatible = "atmel,24c64"; - reg = <0x54>; + reg = <0x50>; pagesize = <32>; vcc-supply = <&mt6358_vcn18_reg>; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi index 181da69d18f4..b0469a95ddc4 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183-kukui-krane.dtsi @@ -89,9 +89,9 @@ clock-frequency = <400000>; vbus-supply = <&mt6358_vcn18_reg>; - eeprom@54 { + eeprom@50 { compatible = "atmel,24c32"; - reg = <0x54>; + reg = <0x50>; pagesize = <32>; vcc-supply = <&mt6358_vcn18_reg>; }; diff --git a/arch/arm64/boot/dts/mediatek/mt8183.dtsi b/arch/arm64/boot/dts/mediatek/mt8183.dtsi index 2147e152683b..fe4632744f6e 100644 --- a/arch/arm64/boot/dts/mediatek/mt8183.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8183.dtsi @@ -1753,6 +1753,7 @@ resets = <&mmsys MT8183_MMSYS_SW0_RST_B_DISP_DSI0>; phys = <&mipi_tx0>; phy-names = "dphy"; + status = "disabled"; }; mutex: mutex@14016000 { diff --git a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi index 0814ed6a7272..7e5230581a1c 100644 --- a/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi @@ -901,7 +901,6 @@ regulators { mt6315_6_vbuck1: vbuck1 { - regulator-compatible = "vbuck1"; regulator-name = "Vbcpu"; regulator-min-microvolt = <400000>; regulator-max-microvolt = <1193750>; @@ -911,7 +910,6 @@ }; mt6315_6_vbuck3: vbuck3 { - regulator-compatible = "vbuck3"; regulator-name = "Vlcpu"; regulator-min-microvolt = <400000>; regulator-max-microvolt = <1193750>; @@ -928,7 +926,6 @@ regulators { mt6315_7_vbuck1: vbuck1 { - regulator-compatible = "vbuck1"; regulator-name = "Vgpu"; regulator-min-microvolt = <400000>; regulator-max-microvolt = <800000>; diff --git a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi index 39e4f6a560f9..e4861c6cd78e 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195-cherry.dtsi @@ -843,7 +843,6 @@ regulators { mt6315_6_vbuck1: vbuck1 { - regulator-compatible = "vbuck1"; regulator-name = "Vbcpu"; regulator-min-microvolt = <400000>; regulator-max-microvolt = <1193750>; @@ -861,7 +860,6 @@ regulators { mt6315_7_vbuck1: vbuck1 { - regulator-compatible = "vbuck1"; regulator-name = "Vgpu"; regulator-min-microvolt = <400000>; regulator-max-microvolt = <1193750>; @@ -906,6 +904,7 @@ vusb33-supply = <&mt6359_vusb_ldo_reg>; vbus-supply = <&usb_vbus>; + mediatek,u3p-dis-msk = <1>; }; &xhci2 { @@ -922,7 +921,6 @@ usb2-lpm-disable; vusb33-supply = <&mt6359_vusb_ldo_reg>; vbus-supply = <&usb_vbus>; - mediatek,u3p-dis-msk = <1>; }; #include diff --git a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts index 998c2e78168a..4e1803ab9963 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195-demo.dts +++ b/arch/arm64/boot/dts/mediatek/mt8195-demo.dts @@ -120,7 +120,6 @@ richtek,vinovp-microvolt = <14500000>; otg_vbus_regulator: usb-otg-vbus-regulator { - regulator-compatible = "usb-otg-vbus"; regulator-name = "usb-otg-vbus"; regulator-min-microvolt = <4425000>; regulator-max-microvolt = <5825000>; @@ -132,7 +131,6 @@ LDO_VIN3-supply = <&mt6360_buck2>; mt6360_buck1: buck1 { - regulator-compatible = "BUCK1"; regulator-name = "mt6360,buck1"; regulator-min-microvolt = <300000>; regulator-max-microvolt = <1300000>; @@ -143,7 +141,6 @@ }; mt6360_buck2: buck2 { - regulator-compatible = "BUCK2"; regulator-name = "mt6360,buck2"; regulator-min-microvolt = <300000>; regulator-max-microvolt = <1300000>; @@ -154,7 +151,6 @@ }; mt6360_ldo1: ldo1 { - regulator-compatible = "LDO1"; regulator-name = "mt6360,ldo1"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3600000>; @@ -163,7 +159,6 @@ }; mt6360_ldo2: ldo2 { - regulator-compatible = "LDO2"; regulator-name = "mt6360,ldo2"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3600000>; @@ -172,7 +167,6 @@ }; mt6360_ldo3: ldo3 { - regulator-compatible = "LDO3"; regulator-name = "mt6360,ldo3"; regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3600000>; @@ -181,7 +175,6 @@ }; mt6360_ldo5: ldo5 { - regulator-compatible = "LDO5"; regulator-name = "mt6360,ldo5"; regulator-min-microvolt = <2700000>; regulator-max-microvolt = <3600000>; @@ -190,7 +183,6 @@ }; mt6360_ldo6: ldo6 { - regulator-compatible = "LDO6"; regulator-name = "mt6360,ldo6"; regulator-min-microvolt = <500000>; regulator-max-microvolt = <2100000>; @@ -199,7 +191,6 @@ }; mt6360_ldo7: ldo7 { - regulator-compatible = "LDO7"; regulator-name = "mt6360,ldo7"; regulator-min-microvolt = <500000>; regulator-max-microvolt = <2100000>; diff --git a/arch/arm64/boot/dts/mediatek/mt8195.dtsi b/arch/arm64/boot/dts/mediatek/mt8195.dtsi index bdf002e9cece..274edce5d5e6 100644 --- a/arch/arm64/boot/dts/mediatek/mt8195.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8195.dtsi @@ -332,7 +332,7 @@ }; infracfg_ao: syscon@10001000 { - compatible = "mediatek,mt8195-infracfg_ao", "syscon", "simple-mfd"; + compatible = "mediatek,mt8195-infracfg_ao", "syscon"; reg = <0 0x10001000 0 0x1000>; #clock-cells = <1>; #reset-cells = <1>; @@ -2000,7 +2000,7 @@ }; ovl0: ovl@1c000000 { - compatible = "mediatek,mt8195-disp-ovl", "mediatek,mt8183-disp-ovl"; + compatible = "mediatek,mt8195-disp-ovl"; reg = <0 0x1c000000 0 0x1000>; interrupts = ; power-domains = <&spm MT8195_POWER_DOMAIN_VDOSYS0>; diff --git a/arch/arm64/boot/dts/mediatek/mt8516.dtsi b/arch/arm64/boot/dts/mediatek/mt8516.dtsi index d1b67c82d761..5655f12723f1 100644 --- a/arch/arm64/boot/dts/mediatek/mt8516.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt8516.dtsi @@ -144,10 +144,10 @@ #size-cells = <2>; ranges; - /* 128 KiB reserved for ARM Trusted Firmware (BL31) */ + /* 192 KiB reserved for ARM Trusted Firmware (BL31) */ bl31_secmon_reserved: secmon@43000000 { no-map; - reg = <0 0x43000000 0 0x20000>; + reg = <0 0x43000000 0 0x30000>; }; }; @@ -206,7 +206,7 @@ compatible = "mediatek,mt8516-wdt", "mediatek,mt6589-wdt"; reg = <0 0x10007000 0 0x1000>; - interrupts = ; + interrupts = ; #reset-cells = <1>; }; @@ -269,7 +269,7 @@ interrupt-parent = <&gic>; interrupt-controller; reg = <0 0x10310000 0 0x1000>, - <0 0x10320000 0 0x1000>, + <0 0x1032f000 0 0x2000>, <0 0x10340000 0 0x2000>, <0 0x10360000 0 0x2000>; interrupts = , <0 0x11000180 0 0x80>; interrupts = ; + clock-div = <2>; clocks = <&topckgen CLK_TOP_I2C0>, <&topckgen CLK_TOP_APDMA>; clock-names = "main", "dma"; @@ -359,6 +360,7 @@ reg = <0 0x1100a000 0 0x90>, <0 0x11000200 0 0x80>; interrupts = ; + clock-div = <2>; clocks = <&topckgen CLK_TOP_I2C1>, <&topckgen CLK_TOP_APDMA>; clock-names = "main", "dma"; @@ -373,6 +375,7 @@ reg = <0 0x1100b000 0 0x90>, <0 0x11000280 0 0x80>; interrupts = ; + clock-div = <2>; clocks = <&topckgen CLK_TOP_I2C2>, <&topckgen CLK_TOP_APDMA>; clock-names = "main", "dma"; diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi index ec8dfb3d1c6d..a356db5fcc5f 100644 --- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi +++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi @@ -47,7 +47,6 @@ }; &i2c0 { - clock-div = <2>; pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins_a>; status = "okay"; @@ -156,7 +155,6 @@ }; &i2c2 { - clock-div = <2>; pinctrl-names = "default"; pinctrl-0 = <&i2c2_pins_a>; status = "okay"; diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi index 634373a423ef..481a88d83a65 100644 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi @@ -1631,7 +1631,7 @@ regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-always-on; - gpio = <&exp1 14 GPIO_ACTIVE_HIGH>; + gpio = <&exp1 9 GPIO_ACTIVE_HIGH>; enable-active-high; vin-supply = <&vdd_1v8>; }; diff --git a/arch/arm64/boot/dts/nvidia/tegra234.dtsi b/arch/arm64/boot/dts/nvidia/tegra234.dtsi index 6598e9ac52b8..876c73a00a54 100644 --- a/arch/arm64/boot/dts/nvidia/tegra234.dtsi +++ b/arch/arm64/boot/dts/nvidia/tegra234.dtsi @@ -1249,7 +1249,7 @@ compatible = "nvidia,tegra234-sce-fabric"; reg = <0xb600000 0x40000>; interrupts = ; - status = "okay"; + status = "disabled"; }; rce-fabric@be00000 { @@ -1558,7 +1558,7 @@ }; dce-fabric@de00000 { - compatible = "nvidia,tegra234-sce-fabric"; + compatible = "nvidia,tegra234-dce-fabric"; reg = <0xde00000 0x40000>; interrupts = ; status = "okay"; @@ -1574,6 +1574,8 @@ #redistributor-regions = <1>; #interrupt-cells = <3>; interrupt-controller; + + #address-cells = <0>; }; smmu_iso: iommu@10000000{ diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi index 987cebbda057..571ed1abdad4 100644 --- a/arch/arm64/boot/dts/qcom/msm8916.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi @@ -109,7 +109,7 @@ sleep_clk: sleep-clk { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <32768>; + clock-frequency = <32764>; }; }; diff --git a/arch/arm64/boot/dts/qcom/msm8994.dtsi b/arch/arm64/boot/dts/qcom/msm8994.dtsi index 3c6c2cf99fb9..0eb8eca13ad9 100644 --- a/arch/arm64/boot/dts/qcom/msm8994.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8994.dtsi @@ -33,7 +33,7 @@ sleep_clk: sleep-clk { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <32768>; + clock-frequency = <32764>; clock-output-names = "sleep_clk"; }; }; @@ -434,6 +434,15 @@ #size-cells = <1>; ranges; + interrupts = , + , + , + ; + interrupt-names = "pwr_event", + "qusb2_phy", + "hs_phy_irq", + "ss_phy_irq"; + clocks = <&gcc GCC_USB30_MASTER_CLK>, <&gcc GCC_SYS_NOC_USB3_AXI_CLK>, <&gcc GCC_USB30_SLEEP_CLK>, diff --git a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts index 3bbafb68ba5c..543282fe2abb 100644 --- a/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts +++ b/arch/arm64/boot/dts/qcom/msm8996-xiaomi-gemini.dts @@ -65,7 +65,7 @@ }; led@1 { - reg = <0>; + reg = <1>; chan-name = "button-backlight1"; led-cur = /bits/ 8 <0x32>; max-cur = /bits/ 8 <0xC8>; diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 3b9a4bf89701..b3ebd0298f64 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi @@ -2968,9 +2968,14 @@ #size-cells = <1>; ranges; - interrupts = , + interrupts = , + , + , ; - interrupt-names = "hs_phy_irq", "ss_phy_irq"; + interrupt-names = "pwr_event", + "qusb2_phy", + "hs_phy_irq", + "ss_phy_irq"; clocks = <&gcc GCC_SYS_NOC_USB3_AXI_CLK>, <&gcc GCC_USB30_MASTER_CLK>, diff --git a/arch/arm64/boot/dts/qcom/pm6150.dtsi b/arch/arm64/boot/dts/qcom/pm6150.dtsi index 8a4972e6a24c..b45ffd7d3b36 100644 --- a/arch/arm64/boot/dts/qcom/pm6150.dtsi +++ b/arch/arm64/boot/dts/qcom/pm6150.dtsi @@ -11,7 +11,7 @@ thermal-zones { pm6150_thermal: pm6150-thermal { polling-delay-passive = <100>; - polling-delay = <0>; + thermal-sensors = <&pm6150_temp>; trips { diff --git a/arch/arm64/boot/dts/qcom/pm6150l.dtsi b/arch/arm64/boot/dts/qcom/pm6150l.dtsi index 06d729ff65a9..e7526a7f41e2 100644 --- a/arch/arm64/boot/dts/qcom/pm6150l.dtsi +++ b/arch/arm64/boot/dts/qcom/pm6150l.dtsi @@ -5,6 +5,34 @@ #include #include +/ { + thermal-zones { + pm6150l-thermal { + thermal-sensors = <&pm6150l_temp>; + + trips { + trip0 { + temperature = <95000>; + hysteresis = <0>; + type = "passive"; + }; + + trip1 { + temperature = <115000>; + hysteresis = <0>; + type = "hot"; + }; + + trip2 { + temperature = <125000>; + hysteresis = <0>; + type = "critical"; + }; + }; + }; + }; +}; + &spmi_bus { pm6150l_lsid4: pmic@4 { compatible = "qcom,pm6150l", "qcom,spmi-pmic"; @@ -12,6 +40,13 @@ #address-cells = <1>; #size-cells = <0>; + pm6150l_temp: temp-alarm@2400 { + compatible = "qcom,spmi-temp-alarm"; + reg = <0x2400>; + interrupts = <0x4 0x24 0x0 IRQ_TYPE_EDGE_BOTH>; + #thermal-sensor-cells = <0>; + }; + pm6150l_adc: adc@3100 { compatible = "qcom,spmi-adc5"; reg = <0x3100>; diff --git a/arch/arm64/boot/dts/qcom/sc7180-idp.dts b/arch/arm64/boot/dts/qcom/sc7180-idp.dts index 9dee131b1e24..02b507691cc3 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-idp.dts +++ b/arch/arm64/boot/dts/qcom/sc7180-idp.dts @@ -306,14 +306,9 @@ reset-gpios = <&pm6150l_gpio 3 GPIO_ACTIVE_HIGH>; - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - panel0_in: endpoint { - remote-endpoint = <&dsi0_out>; - }; + port { + panel0_in: endpoint { + remote-endpoint = <&dsi0_out>; }; }; }; @@ -333,10 +328,6 @@ vdds-supply = <&vreg_l4a_0p8>; }; -&mdp { - status = "okay"; -}; - &mdss { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi index 7ee407f7b6bb..f98162d3a081 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi @@ -26,7 +26,6 @@ thermal-zones { skin_temp_thermal: skin-temp-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&pm6150_adc_tm 1>; sustainable-power = <965>; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi index bfab67f4a7c9..a7b41498ba88 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-homestar.dtsi @@ -43,7 +43,6 @@ thermal-zones { skin_temp_thermal: skin-temp-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&pm6150_adc_tm 1>; sustainable-power = <965>; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi index a7582fb547ee..31f91f4e9736 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-pompom.dtsi @@ -12,14 +12,11 @@ / { thermal-zones { - 5v-choke-thermal { - polling-delay-passive = <0>; - polling-delay = <250>; - + choke-5v-thermal { thermal-sensors = <&pm6150_adc_tm 1>; trips { - 5v-choke-crit { + choke-5v-crit { temperature = <125000>; hysteresis = <1000>; type = "critical"; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi index 695b04fe7221..a2906126242c 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-quackingstick.dtsi @@ -60,19 +60,15 @@ pinctrl-names = "default"; pinctrl-0 = <&lcd_rst>; avdd-supply = <&ppvar_lcd>; + avee-supply = <&ppvar_lcd>; pp1800-supply = <&v1p8_disp>; pp3300-supply = <&pp3300_dx_edp>; backlight = <&backlight>; rotation = <270>; - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - panel_in: endpoint { - remote-endpoint = <&dsi0_out>; - }; + port { + panel_in: endpoint { + remote-endpoint = <&dsi0_out>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi index 6312108e8b3e..ebb64b91c09f 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-wormdingler.dtsi @@ -50,7 +50,6 @@ thermal-zones { skin_temp_thermal: skin-temp-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&pm6150_adc_tm 1>; sustainable-power = <574>; @@ -124,14 +123,9 @@ backlight = <&backlight>; rotation = <270>; - ports { - #address-cells = <1>; - #size-cells = <0>; - port@0 { - reg = <0>; - panel_in: endpoint { - remote-endpoint = <&dsi0_out>; - }; + port { + panel_in: endpoint { + remote-endpoint = <&dsi0_out>; }; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi index f55ce6f2fdc2..06b6774ef010 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi @@ -20,9 +20,6 @@ / { thermal-zones { charger_thermal: charger-thermal { - polling-delay-passive = <0>; - polling-delay = <0>; - thermal-sensors = <&pm6150_adc_tm 0>; trips { @@ -777,6 +774,10 @@ hp_i2c: &i2c9 { }; }; +&lpasscc { + status = "okay"; +}; + &lpass_cpu { status = "okay"; @@ -802,7 +803,7 @@ hp_i2c: &i2c9 { }; }; -&mdp { +&lpass_hm { status = "okay"; }; diff --git a/arch/arm64/boot/dts/qcom/sc7180.dtsi b/arch/arm64/boot/dts/qcom/sc7180.dtsi index 13fe1c92bf35..a9f937b06847 100644 --- a/arch/arm64/boot/dts/qcom/sc7180.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180.dtsi @@ -2937,8 +2937,6 @@ interrupt-parent = <&mdss>; interrupts = <0>; - status = "disabled"; - ports { #address-cells = <1>; #size-cells = <0>; @@ -2985,7 +2983,8 @@ }; dsi0: dsi@ae94000 { - compatible = "qcom,mdss-dsi-ctrl"; + compatible = "qcom,sc7180-dsi-ctrl", + "qcom,mdss-dsi-ctrl"; reg = <0 0x0ae94000 0 0x400>; reg-names = "dsi_ctrl"; @@ -3576,6 +3575,8 @@ power-domains = <&lpass_hm LPASS_CORE_HM_GDSCR>; #clock-cells = <1>; #power-domain-cells = <1>; + + status = "reserved"; /* Controlled by ADSP */ }; lpass_cpu: lpass@62d87000 { @@ -3621,13 +3622,14 @@ clock-names = "iface", "bi_tcxo"; #clock-cells = <1>; #power-domain-cells = <1>; + + status = "reserved"; /* Controlled by ADSP */ }; }; thermal-zones { cpu0_thermal: cpu0-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 1>; sustainable-power = <1052>; @@ -3676,7 +3678,6 @@ cpu1_thermal: cpu1-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 2>; sustainable-power = <1052>; @@ -3725,7 +3726,6 @@ cpu2_thermal: cpu2-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 3>; sustainable-power = <1052>; @@ -3774,7 +3774,6 @@ cpu3_thermal: cpu3-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 4>; sustainable-power = <1052>; @@ -3823,7 +3822,6 @@ cpu4_thermal: cpu4-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 5>; sustainable-power = <1052>; @@ -3872,7 +3870,6 @@ cpu5_thermal: cpu5-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 6>; sustainable-power = <1052>; @@ -3921,7 +3918,6 @@ cpu6_thermal: cpu6-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 9>; sustainable-power = <1425>; @@ -3962,7 +3958,6 @@ cpu7_thermal: cpu7-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 10>; sustainable-power = <1425>; @@ -4003,7 +3998,6 @@ cpu8_thermal: cpu8-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 11>; sustainable-power = <1425>; @@ -4044,7 +4038,6 @@ cpu9_thermal: cpu9-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 12>; sustainable-power = <1425>; @@ -4085,7 +4078,6 @@ aoss0-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 0>; @@ -4106,7 +4098,6 @@ cpuss0-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 7>; @@ -4126,7 +4117,6 @@ cpuss1-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 8>; @@ -4146,7 +4136,6 @@ gpuss0-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 13>; @@ -4174,7 +4163,6 @@ gpuss1-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens0 14>; @@ -4202,7 +4190,6 @@ aoss1-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 0>; @@ -4223,7 +4210,6 @@ cwlan-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 1>; @@ -4244,7 +4230,6 @@ audio-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 2>; @@ -4265,7 +4250,6 @@ ddr-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 3>; @@ -4286,7 +4270,6 @@ q6-hvx-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 4>; @@ -4307,7 +4290,6 @@ camera-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 5>; @@ -4328,7 +4310,6 @@ mdm-core-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 6>; @@ -4349,7 +4330,6 @@ mdm-dsp-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 7>; @@ -4370,7 +4350,6 @@ npu-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 8>; @@ -4391,7 +4370,6 @@ video-thermal { polling-delay-passive = <250>; - polling-delay = <0>; thermal-sensors = <&tsens1 9>; diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi index b5cd24d59ad9..b778728390e5 100644 --- a/arch/arm64/boot/dts/qcom/sc7280.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi @@ -79,7 +79,7 @@ sleep_clk: sleep-clk { compatible = "fixed-clock"; - clock-frequency = <32000>; + clock-frequency = <32764>; #clock-cells = <0>; }; }; diff --git a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi index 7e3aaf5de3f5..6b0d4bc6c541 100644 --- a/arch/arm64/boot/dts/qcom/sc8280xp.dtsi +++ b/arch/arm64/boot/dts/qcom/sc8280xp.dtsi @@ -1119,7 +1119,7 @@ remoteproc_adsp: remoteproc@3000000 { compatible = "qcom,sc8280xp-adsp-pas"; - reg = <0 0x03000000 0 0x100>; + reg = <0 0x03000000 0 0x10000>; interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>, <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>, @@ -1806,7 +1806,7 @@ remoteproc_nsp0: remoteproc@1b300000 { compatible = "qcom,sc8280xp-nsp0-pas"; - reg = <0 0x1b300000 0 0x100>; + reg = <0 0x1b300000 0 0x10000>; interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>, <&smp2p_nsp0_in 0 IRQ_TYPE_EDGE_RISING>, @@ -1937,7 +1937,7 @@ remoteproc_nsp1: remoteproc@21300000 { compatible = "qcom,sc8280xp-nsp1-pas"; - reg = <0 0x21300000 0 0x100>; + reg = <0 0x21300000 0 0x10000>; interrupts-extended = <&intc GIC_SPI 887 IRQ_TYPE_LEVEL_HIGH>, <&smp2p_nsp1_in 0 IRQ_TYPE_EDGE_RISING>, diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi index 71644b9b8866..a5df310ce7f3 100644 --- a/arch/arm64/boot/dts/qcom/sdm845.dtsi +++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi @@ -4234,16 +4234,16 @@ "vfe1", "vfe_lite"; - interrupts = , - , - , - , - , - , - , - , - , - ; + interrupts = , + , + , + , + , + , + , + , + , + ; interrupt-names = "csid0", "csid1", "csid2", diff --git a/arch/arm64/boot/dts/qcom/sm6125.dtsi b/arch/arm64/boot/dts/qcom/sm6125.dtsi index 271247b37175..a3876a322baa 100644 --- a/arch/arm64/boot/dts/qcom/sm6125.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6125.dtsi @@ -27,7 +27,7 @@ sleep_clk: sleep-clk { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <32000>; + clock-frequency = <32764>; clock-output-names = "sleep_clk"; }; }; diff --git a/arch/arm64/boot/dts/qcom/sm6350.dtsi b/arch/arm64/boot/dts/qcom/sm6350.dtsi index ba078099b805..2fb751330e49 100644 --- a/arch/arm64/boot/dts/qcom/sm6350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm6350.dtsi @@ -855,7 +855,7 @@ adsp: remoteproc@3000000 { compatible = "qcom,sm6350-adsp-pas"; - reg = <0 0x03000000 0 0x100>; + reg = <0x0 0x03000000 0x0 0x10000>; interrupts-extended = <&pdc 6 IRQ_TYPE_LEVEL_HIGH>, <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>, @@ -923,7 +923,7 @@ mpss: remoteproc@4080000 { compatible = "qcom,sm6350-mpss-pas"; - reg = <0x0 0x04080000 0x0 0x4040>; + reg = <0x0 0x04080000 0x0 0x10000>; interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_EDGE_RISING>, <&modem_smp2p_in 0 IRQ_TYPE_EDGE_RISING>, diff --git a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts index 30c94fd4fe61..be47e34da990 100644 --- a/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts +++ b/arch/arm64/boot/dts/qcom/sm7225-fairphone-fp4.dts @@ -20,7 +20,7 @@ chassis-type = "handset"; /* required for bootloader to select correct board */ - qcom,msm-id = <434 0x10000>, <459 0x10000>; + qcom,msm-id = <459 0x10000>; qcom,board-id = <8 32>; aliases { diff --git a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts index 5397fba9417b..51ddbac3cfe5 100644 --- a/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts +++ b/arch/arm64/boot/dts/qcom/sm8150-microsoft-surface-duo.dts @@ -376,8 +376,8 @@ pinctrl-0 = <&da7280_intr_default>; dlg,actuator-type = "LRA"; - dlg,dlg,const-op-mode = <1>; - dlg,dlg,periodic-op-mode = <1>; + dlg,const-op-mode = <1>; + dlg,periodic-op-mode = <1>; dlg,nom-microvolt = <2000000>; dlg,abs-max-microvolt = <2000000>; dlg,imax-microamp = <129000>; diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi index c9780b2afd2f..eb500cb67c86 100644 --- a/arch/arm64/boot/dts/qcom/sm8250.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi @@ -84,7 +84,7 @@ sleep_clk: sleep-clk { compatible = "fixed-clock"; - clock-frequency = <32768>; + clock-frequency = <32764>; #clock-cells = <0>; }; }; @@ -3291,20 +3291,20 @@ "vfe_lite0", "vfe_lite1"; - interrupts = , - , - , - , - , - , - , - , - , - , - , - , - , - ; + interrupts = , + , + , + , + , + , + , + , + , + , + , + , + , + ; interrupt-names = "csiphy0", "csiphy1", "csiphy2", diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi index 888bf4cd73c3..75292ec3ee77 100644 --- a/arch/arm64/boot/dts/qcom/sm8350.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi @@ -34,7 +34,7 @@ sleep_clk: sleep-clk { compatible = "fixed-clock"; - clock-frequency = <32000>; + clock-frequency = <32764>; #clock-cells = <0>; }; @@ -421,7 +421,7 @@ no-map; }; - pil_camera_mem: mmeory@85200000 { + pil_camera_mem: memory@85200000 { reg = <0x0 0x85200000 0x0 0x500000>; no-map; }; @@ -1641,7 +1641,7 @@ mpss: remoteproc@4080000 { compatible = "qcom,sm8350-mpss-pas"; - reg = <0x0 0x04080000 0x0 0x4040>; + reg = <0x0 0x04080000 0x0 0x10000>; interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_LEVEL_HIGH>, <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>, @@ -2226,7 +2226,7 @@ cdsp: remoteproc@98900000 { compatible = "qcom,sm8350-cdsp-pas"; - reg = <0 0x098900000 0 0x1400000>; + reg = <0 0x98900000 0 0x1400000>; interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>, <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>, diff --git a/arch/arm64/boot/dts/qcom/sm8450.dtsi b/arch/arm64/boot/dts/qcom/sm8450.dtsi index aa0977af9411..9420857871b1 100644 --- a/arch/arm64/boot/dts/qcom/sm8450.dtsi +++ b/arch/arm64/boot/dts/qcom/sm8450.dtsi @@ -33,7 +33,7 @@ sleep_clk: sleep-clk { compatible = "fixed-clock"; #clock-cells = <0>; - clock-frequency = <32000>; + clock-frequency = <32764>; }; }; @@ -2093,7 +2093,7 @@ remoteproc_adsp: remoteproc@30000000 { compatible = "qcom,sm8450-adsp-pas"; - reg = <0 0x030000000 0 0x100>; + reg = <0 0x30000000 0 0x100>; interrupts-extended = <&pdc 6 IRQ_TYPE_EDGE_RISING>, <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>, @@ -2159,7 +2159,7 @@ remoteproc_cdsp: remoteproc@32300000 { compatible = "qcom,sm8450-cdsp-pas"; - reg = <0 0x032300000 0 0x1400000>; + reg = <0 0x32300000 0 0x10000>; interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_EDGE_RISING>, <&smp2p_cdsp_in 0 IRQ_TYPE_EDGE_RISING>, @@ -2265,7 +2265,7 @@ remoteproc_mpss: remoteproc@4080000 { compatible = "qcom,sm8450-mpss-pas"; - reg = <0x0 0x04080000 0x0 0x4040>; + reg = <0x0 0x04080000 0x0 0x10000>; interrupts-extended = <&intc GIC_SPI 264 IRQ_TYPE_EDGE_RISING>, <&smp2p_modem_in 0 IRQ_TYPE_EDGE_RISING>, diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index d5ecb9e819dc..b0f1d2d61162 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi @@ -524,6 +524,7 @@ power-domain@RK3328_PD_HEVC { reg = ; + clocks = <&cru SCLK_VENC_CORE>; #power-domain-cells = <0>; }; power-domain@RK3328_PD_VIDEO { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts index fe5b52610010..6a6b36c36ce2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-nanopi-r4s.dts @@ -117,7 +117,7 @@ }; &u2phy1_host { - status = "disabled"; + phy-supply = <&vdd_5v>; }; &uart0 { diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index e089e0c26a72..a6bae0a45a6d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi @@ -147,7 +147,7 @@ snps,reset-active-low; snps,reset-delays-us = <0 10000 50000>; tx_delay = <0x10>; - rx_delay = <0x10>; + rx_delay = <0x23>; status = "okay"; }; diff --git a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi index eb8690a6be16..04222028e53e 100644 --- a/arch/arm64/boot/dts/ti/k3-am62-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62-main.dtsi @@ -23,7 +23,6 @@ interrupt-controller; reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ <0x00 0x01880000 0x00 0xc0000>, /* GICR */ - <0x00 0x01880000 0x00 0xc0000>, /* GICR */ <0x01 0x00000000 0x00 0x2000>, /* GICC */ <0x01 0x00010000 0x00 0x1000>, /* GICH */ <0x01 0x00020000 0x00 0x2000>; /* GICV */ diff --git a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi index 9301ea388802..4b349d73da21 100644 --- a/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am62a-main.dtsi @@ -18,7 +18,6 @@ compatible = "arm,gic-v3"; reg = <0x00 0x01800000 0x00 0x10000>, /* GICD */ <0x00 0x01880000 0x00 0xc0000>, /* GICR */ - <0x00 0x01880000 0x00 0xc0000>, /* GICR */ <0x01 0x00000000 0x00 0x2000>, /* GICC */ <0x01 0x00010000 0x00 0x1000>, /* GICH */ <0x01 0x00020000 0x00 0x2000>; /* GICV */ diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 8efc3302bf96..41612b03af63 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -75,11 +75,13 @@ #define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_CORTEX_A77 0xD0D +#define ARM_CPU_PART_CORTEX_A76AE 0xD0E #define ARM_CPU_PART_NEOVERSE_V1 0xD40 #define ARM_CPU_PART_CORTEX_A78 0xD41 #define ARM_CPU_PART_CORTEX_A78AE 0xD42 #define ARM_CPU_PART_CORTEX_X1 0xD44 #define ARM_CPU_PART_CORTEX_A510 0xD46 +#define ARM_CPU_PART_CORTEX_X1C 0xD4C #define ARM_CPU_PART_CORTEX_A520 0xD80 #define ARM_CPU_PART_CORTEX_A710 0xD47 #define ARM_CPU_PART_CORTEX_A715 0xD4D @@ -119,6 +121,7 @@ #define QCOM_CPU_PART_KRYO 0x200 #define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800 #define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801 +#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802 #define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803 #define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804 #define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805 @@ -129,6 +132,7 @@ #define FUJITSU_CPU_PART_A64FX 0x001 #define HISI_CPU_PART_TSV110 0xD01 +#define HISI_CPU_PART_HIP09 0xD02 #define APPLE_CPU_PART_M1_ICESTORM 0x022 #define APPLE_CPU_PART_M1_FIRESTORM 0x023 @@ -151,11 +155,13 @@ #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) +#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE) #define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) #define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) #define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) +#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C) #define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520) #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715) @@ -188,6 +194,7 @@ #define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO) #define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD) #define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER) +#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD) #define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER) #define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD) #define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER) @@ -195,6 +202,7 @@ #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) +#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09) #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) #define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO) diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 930b0e6c9462..7622782d0bb9 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -56,11 +56,13 @@ extern void fpsimd_signal_preserve_current_state(void); extern void fpsimd_preserve_current_state(void); extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); +extern void fpsimd_kvm_prepare(void); extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, void *sve_state, unsigned int sve_vl, void *za_state, unsigned int sme_vl, - u64 *svcr); + u64 *svcr, enum fp_type *type, + enum fp_type to_save); extern void fpsimd_flush_task_state(struct task_struct *target); extern void fpsimd_save_and_flush_cpu_state(void); diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 834bff720582..0075dc5d8238 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -619,6 +619,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result, } #endif u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type); +u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type); s32 aarch64_get_branch_offset(u32 insn); u32 aarch64_set_branch_offset(u32 insn, s32 offset); diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 577cf444c113..0935f9849510 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -67,6 +67,7 @@ enum kvm_mode kvm_get_mode(void); DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); extern unsigned int kvm_sve_max_vl; +extern unsigned int kvm_host_sve_max_vl; int kvm_arm_init_sve(void); u32 __attribute_const__ kvm_target_cpu(void); @@ -309,8 +310,18 @@ struct vcpu_reset_state { struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; - /* Guest floating point state */ + /* + * Guest floating point state + * + * The architecture has two main floating point extensions, + * the original FPSIMD and SVE. These have overlapping + * register views, with the FPSIMD V registers occupying the + * low 128 bits of the SVE Z registers. When the core + * floating point code saves the register state of a task it + * records which view it saved in fp_type. + */ void *sve_state; + enum fp_type fp_type; unsigned int sve_max_vl; u64 svcr; @@ -320,7 +331,6 @@ struct kvm_vcpu_arch { /* Values of trap registers for the guest. */ u64 hcr_el2; u64 mdcr_el2; - u64 cptr_el2; /* Values of trap registers for the host before guest entry. */ u64 mdcr_el2_host; @@ -370,7 +380,6 @@ struct kvm_vcpu_arch { struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch external_debug_state; - struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ struct task_struct *parent_task; struct { @@ -547,10 +556,6 @@ struct kvm_vcpu_arch { /* Save TRBE context if active */ #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6)) -/* SVE enabled for host EL0 */ -#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0)) -/* SME enabled for EL0 */ -#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1)) /* Physical CPU not in supported_cpus */ #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2)) /* WFIT instruction trapped */ diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index aa7fa2a08f06..1d0bb7624a1c 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -122,5 +122,6 @@ extern u64 kvm_nvhe_sym(id_aa64isar2_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val); extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val); +extern unsigned int kvm_nvhe_sym(kvm_host_sve_max_vl); #endif /* __ARM64_KVM_HYP_H__ */ diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 5966ee4a6154..101771c60d80 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -3,6 +3,8 @@ #define __ASM_MMAN_H__ #include +#include +#include #include #include @@ -21,19 +23,24 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) -static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) +static inline unsigned long arch_calc_vm_flag_bits(struct file *file, + unsigned long flags) { /* * Only allow MTE on anonymous mappings as these are guaranteed to be * backed by tags-capable memory. The vm_flags may be overridden by a * filesystem supporting MTE (RAM-based). */ - if (system_supports_mte() && (flags & MAP_ANONYMOUS)) - return VM_MTE_ALLOWED; + if (system_supports_mte()) { + if ((flags & MAP_ANONYMOUS) && !(flags & MAP_HUGETLB)) + return VM_MTE_ALLOWED; + if (shmem_file(file)) + return VM_MTE_ALLOWED; + } return 0; } -#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) +#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags) static inline bool arch_validate_prot(unsigned long prot, unsigned long addr __always_unused) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 1d713cfb0af1..426c3cb3e3bb 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -677,7 +677,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) #define pud_none(pud) (!pud_val(pud)) -#define pud_bad(pud) (!pud_table(pud)) +#define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \ + PUD_TYPE_TABLE) #define pud_present(pud) pte_present(pud_pte(pud)) #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) #define pud_valid(pud) pte_valid(pud_pte(pud)) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 400f8956328b..1b822e618bb4 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -122,6 +122,12 @@ enum vec_type { ARM64_VEC_MAX, }; +enum fp_type { + FP_STATE_CURRENT, /* Save based on current task state. */ + FP_STATE_FPSIMD, + FP_STATE_SVE, +}; + struct cpu_context { unsigned long x19; unsigned long x20; @@ -152,6 +158,7 @@ struct thread_struct { struct user_fpsimd_state fpsimd_state; } uw; + enum fp_type fp_type; /* registers FPSIMD or SVE? */ unsigned int fpsimd_cpu; void *sve_state; /* SVE registers, if any */ void *za_state; /* ZA register, if any */ diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h index aa3d3607d5c8..9de17d4bb036 100644 --- a/arch/arm64/include/asm/spectre.h +++ b/arch/arm64/include/asm/spectre.h @@ -96,7 +96,9 @@ enum mitigation_state arm64_get_meltdown_state(void); enum mitigation_state arm64_get_spectre_bhb_state(void); bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); -u8 spectre_bhb_loop_affected(int scope); +extern bool __nospectre_bhb; +u8 get_spectre_bhb_loop_value(void); +bool is_spectre_bhb_fw_mitigated(void); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); #endif /* __ASSEMBLY__ */ #endif /* __ASM_SPECTRE_H */ diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c index 97c42be71338..1510f457b615 100644 --- a/arch/arm64/kernel/cacheinfo.c +++ b/arch/arm64/kernel/cacheinfo.c @@ -87,16 +87,18 @@ int populate_cache_leaves(unsigned int cpu) unsigned int level, idx; enum cache_type type; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct cacheinfo *this_leaf = this_cpu_ci->info_list; + struct cacheinfo *infos = this_cpu_ci->info_list; for (idx = 0, level = 1; level <= this_cpu_ci->num_levels && - idx < this_cpu_ci->num_leaves; idx++, level++) { + idx < this_cpu_ci->num_leaves; level++) { type = get_cache_type(level); if (type == CACHE_TYPE_SEPARATE) { - ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); - ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); + if (idx + 1 >= this_cpu_ci->num_leaves) + break; + ci_leaf_init(&infos[idx++], CACHE_TYPE_DATA, level); + ci_leaf_init(&infos[idx++], CACHE_TYPE_INST, level); } else { - ci_leaf_init(this_leaf++, type, level); + ci_leaf_init(&infos[idx++], type, level); } } return 0; diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c index deff21bfa680..b68e1d328d4c 100644 --- a/arch/arm64/kernel/compat_alignment.c +++ b/arch/arm64/kernel/compat_alignment.c @@ -368,6 +368,8 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs) return 1; } + if (!handler) + return 1; type = handler(addr, instr, regs); if (type == TYPE_ERROR || type == TYPE_FAULT) diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 59b5a16bab5d..b3e101a7d04f 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -125,6 +125,8 @@ struct fpsimd_last_state_struct { u64 *svcr; unsigned int sve_vl; unsigned int sme_vl; + enum fp_type *fp_type; + enum fp_type to_save; }; static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); @@ -330,15 +332,6 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, * The task can execute SVE instructions while in userspace without * trapping to the kernel. * - * When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the - * corresponding Zn), P0-P15 and FFR are encoded in - * task->thread.sve_state, formatted appropriately for vector - * length task->thread.sve_vl or, if SVCR.SM is set, - * task->thread.sme_vl. - * - * task->thread.sve_state must point to a valid buffer at least - * sve_state_size(task) bytes in size. - * * During any syscall, the kernel may optionally clear TIF_SVE and * discard the vector state except for the FPSIMD subset. * @@ -348,7 +341,15 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, * do_sve_acc() to be called, which does some preparation and then * sets TIF_SVE. * - * When stored, FPSIMD registers V0-V31 are encoded in + * During any syscall, the kernel may optionally clear TIF_SVE and + * discard the vector state except for the FPSIMD subset. + * + * The data will be stored in one of two formats: + * + * * FPSIMD only - FP_STATE_FPSIMD: + * + * When the FPSIMD only state stored task->thread.fp_type is set to + * FP_STATE_FPSIMD, the FPSIMD registers V0-V31 are encoded in * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are * logically zero but not stored anywhere; P0-P15 and FFR are not * stored and have unspecified values from userspace's point of @@ -356,7 +357,23 @@ void task_set_vl_onexec(struct task_struct *task, enum vec_type type, * but userspace is discouraged from relying on this. * * task->thread.sve_state does not need to be non-NULL, valid or any - * particular size: it must not be dereferenced. + * particular size: it must not be dereferenced and any data stored + * there should be considered stale and not referenced. + * + * * SVE state - FP_STATE_SVE: + * + * When the full SVE state is stored task->thread.fp_type is set to + * FP_STATE_SVE and Z0-Z31 (incorporating Vn in bits[127:0] or the + * corresponding Zn), P0-P15 and FFR are encoded in in + * task->thread.sve_state, formatted appropriately for vector + * length task->thread.sve_vl or, if SVCR.SM is set, + * task->thread.sme_vl. The storage for the vector registers in + * task->thread.uw.fpsimd_state should be ignored. + * + * task->thread.sve_state must point to a valid buffer at least + * sve_state_size(task) bytes in size. The data stored in + * task->thread.uw.fpsimd_state.vregs should be considered stale + * and not referenced. * * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state * irrespective of whether TIF_SVE is clear or set, since these are @@ -404,12 +421,15 @@ static void task_fpsimd_load(void) } } - if (restore_sve_regs) + if (restore_sve_regs) { + WARN_ON_ONCE(current->thread.fp_type != FP_STATE_SVE); sve_load_state(sve_pffr(¤t->thread), ¤t->thread.uw.fpsimd_state.fpsr, restore_ffr); - else + } else { + WARN_ON_ONCE(current->thread.fp_type != FP_STATE_FPSIMD); fpsimd_load_state(¤t->thread.uw.fpsimd_state); + } } /* @@ -419,8 +439,8 @@ static void task_fpsimd_load(void) * last, if KVM is involved this may be the guest VM context rather * than the host thread for the VM pointed to by current. This means * that we must always reference the state storage via last rather - * than via current, other than the TIF_ flags which KVM will - * carefully maintain for us. + * than via current, if we are saving KVM state then it will have + * ensured that the type of registers to save is set in last->to_save. */ static void fpsimd_save(void) { @@ -437,7 +457,8 @@ static void fpsimd_save(void) if (test_thread_flag(TIF_FOREIGN_FPSTATE)) return; - if (test_thread_flag(TIF_SVE)) { + if ((last->to_save == FP_STATE_CURRENT && test_thread_flag(TIF_SVE)) || + last->to_save == FP_STATE_SVE) { save_sve_regs = true; save_ffr = true; vl = last->sve_vl; @@ -474,8 +495,10 @@ static void fpsimd_save(void) sve_save_state((char *)last->sve_state + sve_ffr_offset(vl), &last->st->fpsr, save_ffr); + *last->fp_type = FP_STATE_SVE; } else { fpsimd_save_state(last->st); + *last->fp_type = FP_STATE_FPSIMD; } } @@ -851,8 +874,10 @@ int vec_set_vector_length(struct task_struct *task, enum vec_type type, fpsimd_flush_task_state(task); if (test_and_clear_tsk_thread_flag(task, TIF_SVE) || - thread_sm_enabled(&task->thread)) + thread_sm_enabled(&task->thread)) { sve_to_fpsimd(task); + task->thread.fp_type = FP_STATE_FPSIMD; + } if (system_supports_sme()) { if (type == ARM64_VEC_SME || @@ -1234,8 +1259,10 @@ void fpsimd_release_task(struct task_struct *dead_task) */ void sme_alloc(struct task_struct *task, bool flush) { - if (task->thread.za_state && flush) { - memset(task->thread.za_state, 0, za_state_size(task)); + if (task->thread.za_state) { + if (flush) + memset(task->thread.za_state, 0, + za_state_size(task)); return; } @@ -1383,6 +1410,8 @@ static void sve_init_regs(void) fpsimd_bind_task_to_cpu(); } else { fpsimd_to_sve(current); + current->thread.fp_type = FP_STATE_SVE; + fpsimd_flush_task_state(current); } } @@ -1611,6 +1640,8 @@ void fpsimd_flush_thread(void) current->thread.svcr = 0; } + current->thread.fp_type = FP_STATE_FPSIMD; + put_cpu_fpsimd_context(); kfree(sve_state); kfree(za_state); @@ -1659,6 +1690,8 @@ static void fpsimd_bind_task_to_cpu(void) last->sve_vl = task_get_sve_vl(current); last->sme_vl = task_get_sme_vl(current); last->svcr = ¤t->thread.svcr; + last->fp_type = ¤t->thread.fp_type; + last->to_save = FP_STATE_CURRENT; current->thread.fpsimd_cpu = smp_processor_id(); /* @@ -1682,7 +1715,8 @@ static void fpsimd_bind_task_to_cpu(void) void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, unsigned int sve_vl, void *za_state, - unsigned int sme_vl, u64 *svcr) + unsigned int sme_vl, u64 *svcr, + enum fp_type *type, enum fp_type to_save) { struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); @@ -1696,6 +1730,8 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, last->za_state = za_state; last->sve_vl = sve_vl; last->sme_vl = sme_vl; + last->fp_type = type; + last->to_save = to_save; } /* diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index c8e86e820272..0a366688eda0 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -400,6 +400,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) clear_tsk_thread_flag(dst, TIF_SME); } + dst->thread.fp_type = FP_STATE_FPSIMD; + /* clear any pending asynchronous tag fault raised by the parent */ clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT); @@ -495,7 +497,7 @@ static void tls_thread_switch(struct task_struct *next) if (is_compat_thread(task_thread_info(next))) write_sysreg(next->thread.uw.tp_value, tpidrro_el0); - else if (!arm64_kernel_unmapped_at_el0()) + else write_sysreg(0, tpidrro_el0); write_sysreg(*task_user_tls(next), tpidr_el0); diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c index 2df5e43ae4d1..4978c466e325 100644 --- a/arch/arm64/kernel/proton-pack.c +++ b/arch/arm64/kernel/proton-pack.c @@ -857,52 +857,90 @@ static unsigned long system_bhb_mitigations; * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any * SCOPE_SYSTEM call will give the right answer. */ -u8 spectre_bhb_loop_affected(int scope) +static bool is_spectre_bhb_safe(int scope) +{ + static const struct midr_range spectre_bhb_safe_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A510), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A520), + MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), + {}, + }; + static bool all_safe = true; + + if (scope != SCOPE_LOCAL_CPU) + return all_safe; + + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list)) + return true; + + all_safe = false; + + return false; +} + +static u8 spectre_bhb_loop_affected(void) { u8 k = 0; - static u8 max_bhb_k; - if (scope == SCOPE_LOCAL_CPU) { - static const struct midr_range spectre_bhb_k32_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), - MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), - MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), - {}, - }; - static const struct midr_range spectre_bhb_k24_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), - {}, - }; - static const struct midr_range spectre_bhb_k11_list[] = { - MIDR_ALL_VERSIONS(MIDR_AMPERE1), - {}, - }; - static const struct midr_range spectre_bhb_k8_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), - {}, - }; + static const struct midr_range spectre_bhb_k132_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_X3), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2), + {}, + }; + static const struct midr_range spectre_bhb_k38_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A715), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A720), + {}, + }; + static const struct midr_range spectre_bhb_k32_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), + {}, + }; + static const struct midr_range spectre_bhb_k24_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), + MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD), + MIDR_ALL_VERSIONS(MIDR_HISI_HIP09), + {}, + }; + static const struct midr_range spectre_bhb_k11_list[] = { + MIDR_ALL_VERSIONS(MIDR_AMPERE1), + {}, + }; + static const struct midr_range spectre_bhb_k8_list[] = { + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), + {}, + }; - if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) - k = 32; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) - k = 24; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) - k = 11; - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) - k = 8; - - max_bhb_k = max(max_bhb_k, k); - } else { - k = max_bhb_k; - } + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list)) + k = 132; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list)) + k = 38; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) + k = 32; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) + k = 24; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) + k = 11; + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) + k = 8; return k; } @@ -928,29 +966,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) } } -static bool is_spectre_bhb_fw_affected(int scope) +static bool has_spectre_bhb_fw_mitigation(void) { - static bool system_affected; enum mitigation_state fw_state; bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; - static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { - MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), - MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), - {}, - }; - bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), - spectre_bhb_firmware_mitigated_list); - - if (scope != SCOPE_LOCAL_CPU) - return system_affected; fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); - if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { - system_affected = true; - return true; - } - - return false; + return has_smccc && fw_state == SPECTRE_MITIGATED; } static bool supports_ecbhb(int scope) @@ -966,6 +988,8 @@ static bool supports_ecbhb(int scope) ID_AA64MMFR1_EL1_ECBHB_SHIFT); } +static u8 max_bhb_k; + bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope) { @@ -974,16 +998,23 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, if (supports_csv2p3(scope)) return false; - if (supports_clearbhb(scope)) - return true; + if (is_spectre_bhb_safe(scope)) + return false; - if (spectre_bhb_loop_affected(scope)) - return true; + /* + * At this point the core isn't known to be "safe" so we're going to + * assume it's vulnerable. We still need to update `max_bhb_k` though, + * but only if we aren't mitigating with clearbhb though. + */ + if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU)) + max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected()); - if (is_spectre_bhb_fw_affected(scope)) - return true; + return true; +} - return false; +u8 get_spectre_bhb_loop_value(void) +{ + return max_bhb_k; } static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) @@ -1006,7 +1037,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) isb(); } -static bool __read_mostly __nospectre_bhb; +bool __read_mostly __nospectre_bhb; static int __init parse_spectre_bhb_param(char *str) { __nospectre_bhb = true; @@ -1017,7 +1048,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) { bp_hardening_cb_t cpu_cb; - enum mitigation_state fw_state, state = SPECTRE_VULNERABLE; + enum mitigation_state state = SPECTRE_VULNERABLE; struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU)) @@ -1043,7 +1074,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); state = SPECTRE_MITIGATED; set_bit(BHB_INSN, &system_bhb_mitigations); - } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { + } else if (spectre_bhb_loop_affected()) { /* * Ensure KVM uses the indirect vector which will have the * branchy-loop added. A57/A72-r0 will already have selected @@ -1056,37 +1087,39 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); state = SPECTRE_MITIGATED; set_bit(BHB_LOOP, &system_bhb_mitigations); - } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { - fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); - if (fw_state == SPECTRE_MITIGATED) { - /* - * Ensure KVM uses one of the spectre bp_hardening - * vectors. The indirect vector doesn't include the EL3 - * call, so needs upgrading to - * HYP_VECTOR_SPECTRE_INDIRECT. - */ - if (!data->slot || data->slot == HYP_VECTOR_INDIRECT) - data->slot += 1; + } else if (has_spectre_bhb_fw_mitigation()) { + /* + * Ensure KVM uses one of the spectre bp_hardening + * vectors. The indirect vector doesn't include the EL3 + * call, so needs upgrading to + * HYP_VECTOR_SPECTRE_INDIRECT. + */ + if (!data->slot || data->slot == HYP_VECTOR_INDIRECT) + data->slot += 1; - this_cpu_set_vectors(EL1_VECTOR_BHB_FW); + this_cpu_set_vectors(EL1_VECTOR_BHB_FW); - /* - * The WA3 call in the vectors supersedes the WA1 call - * made during context-switch. Uninstall any firmware - * bp_hardening callback. - */ - cpu_cb = spectre_v2_get_sw_mitigation_cb(); - if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) - __this_cpu_write(bp_hardening_data.fn, NULL); + /* + * The WA3 call in the vectors supersedes the WA1 call + * made during context-switch. Uninstall any firmware + * bp_hardening callback. + */ + cpu_cb = spectre_v2_get_sw_mitigation_cb(); + if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb) + __this_cpu_write(bp_hardening_data.fn, NULL); - state = SPECTRE_MITIGATED; - set_bit(BHB_FW, &system_bhb_mitigations); - } + state = SPECTRE_MITIGATED; + set_bit(BHB_FW, &system_bhb_mitigations); } update_mitigation_state(&spectre_bhb_state, state); } +bool is_spectre_bhb_fw_mitigated(void) +{ + return test_bit(BHB_FW, &system_bhb_mitigations); +} + /* Patched to NOP when enabled */ void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, __le32 *origptr, @@ -1115,7 +1148,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, { u8 rd; u32 insn; - u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); BUG_ON(nr_inst != 1); /* MOV -> MOV */ @@ -1124,7 +1156,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, insn = le32_to_cpu(*origptr); rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); - insn = aarch64_insn_gen_movewide(rd, loop_count, 0, + insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0, AARCH64_INSN_VARIANT_64BIT, AARCH64_INSN_MOVEWIDE_ZERO); *updptr++ = cpu_to_le32(insn); diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index d02dd2be17b3..2f1f86d91612 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -917,6 +917,7 @@ static int sve_set_common(struct task_struct *target, clear_tsk_thread_flag(target, TIF_SVE); if (type == ARM64_VEC_SME) fpsimd_force_sync_to_sve(target); + target->thread.fp_type = FP_STATE_FPSIMD; goto out; } @@ -939,6 +940,7 @@ static int sve_set_common(struct task_struct *target, if (!target->thread.sve_state) { ret = -ENOMEM; clear_tsk_thread_flag(target, TIF_SVE); + target->thread.fp_type = FP_STATE_FPSIMD; goto out; } @@ -952,6 +954,7 @@ static int sve_set_common(struct task_struct *target, fpsimd_sync_to_sve(target); if (type == ARM64_VEC_SVE) set_tsk_thread_flag(target, TIF_SVE); + target->thread.fp_type = FP_STATE_SVE; BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header)); start = SVE_PT_SVE_OFFSET; @@ -1340,7 +1343,7 @@ static int tagged_addr_ctrl_get(struct task_struct *target, { long ctrl = get_tagged_addr_ctrl(target); - if (IS_ERR_VALUE(ctrl)) + if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) return ctrl; return membuf_write(&to, &ctrl, sizeof(ctrl)); @@ -1354,6 +1357,10 @@ static int tagged_addr_ctrl_set(struct task_struct *target, const struct int ret; long ctrl; + ctrl = get_tagged_addr_ctrl(target); + if (WARN_ON_ONCE(IS_ERR_VALUE(ctrl))) + return ctrl; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, 0, -1); if (ret) return ret; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 82f4572c8ddf..2461bbffe7d4 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -207,6 +207,7 @@ static int restore_fpsimd_context(struct fpsimd_context __user *ctx) __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); clear_thread_flag(TIF_SVE); + current->thread.fp_type = FP_STATE_FPSIMD; /* load the hardware registers from the fpsimd_state structure */ if (!err) @@ -297,6 +298,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) if (sve.head.size <= sizeof(*user->sve)) { clear_thread_flag(TIF_SVE); current->thread.svcr &= ~SVCR_SM_MASK; + current->thread.fp_type = FP_STATE_FPSIMD; goto fpsimd_only; } @@ -332,6 +334,7 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user) current->thread.svcr |= SVCR_SM_MASK; else set_thread_flag(TIF_SVE); + current->thread.fp_type = FP_STATE_SVE; fpsimd_only: /* copy the FP and status/control registers */ @@ -937,9 +940,11 @@ static void setup_return(struct pt_regs *regs, struct k_sigaction *ka, * FPSIMD register state - flush the saved FPSIMD * register state in case it gets loaded. */ - if (current->thread.svcr & SVCR_SM_MASK) + if (current->thread.svcr & SVCR_SM_MASK) { memset(¤t->thread.uw.fpsimd_state, 0, sizeof(current->thread.uw.fpsimd_state)); + current->thread.fp_type = FP_STATE_FPSIMD; + } current->thread.svcr &= ~(SVCR_ZA_MASK | SVCR_SM_MASK); diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S index 487381164ff6..2def9d0dd3dd 100644 --- a/arch/arm64/kernel/smccc-call.S +++ b/arch/arm64/kernel/smccc-call.S @@ -7,48 +7,19 @@ #include #include -#include - -/* - * If we have SMCCC v1.3 and (as is likely) no SVE state in - * the registers then set the SMCCC hint bit to say there's no - * need to preserve it. Do this by directly adjusting the SMCCC - * function value which is already stored in x0 ready to be called. - */ -SYM_FUNC_START(__arm_smccc_sve_check) - - ldr_l x16, smccc_has_sve_hint - cbz x16, 2f - - get_current_task x16 - ldr x16, [x16, #TSK_TI_FLAGS] - tbnz x16, #TIF_FOREIGN_FPSTATE, 1f // Any live FP state? - tbnz x16, #TIF_SVE, 2f // Does that state include SVE? - -1: orr x0, x0, ARM_SMCCC_1_3_SVE_HINT - -2: ret -SYM_FUNC_END(__arm_smccc_sve_check) -EXPORT_SYMBOL(__arm_smccc_sve_check) .macro SMCCC instr - stp x29, x30, [sp, #-16]! - mov x29, sp -alternative_if ARM64_SVE - bl __arm_smccc_sve_check -alternative_else_nop_endif \instr #0 - ldr x4, [sp, #16] + ldr x4, [sp] stp x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS] stp x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS] - ldr x4, [sp, #24] + ldr x4, [sp, #8] cbz x4, 1f /* no quirk structure */ ldr x9, [x4, #ARM_SMCCC_QUIRK_ID_OFFS] cmp x9, #ARM_SMCCC_QUIRK_QCOM_A6 b.ne 1f str x6, [x4, ARM_SMCCC_QUIRK_STATE_OFFS] -1: ldp x29, x30, [sp], #16 - ret +1: ret .endm /* diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 6028f1fe2d1c..cc8b36acb996 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -38,6 +38,7 @@ SECTIONS */ /DISCARD/ : { *(.note.GNU-stack .note.gnu.property) + *(.ARM.attributes) } .note : { *(.note.*) } :text :note diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 45131e354e27..a2fb4241f5fb 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -149,6 +149,7 @@ SECTIONS /DISCARD/ : { *(.interp .dynamic) *(.dynsym .dynstr .hash .gnu.hash) + *(.ARM.attributes) } . = KIMAGE_VADDR; @@ -272,6 +273,9 @@ SECTIONS __initdata_end = .; __init_end = .; + .data.rel.ro : { *(.data.rel.ro) } + ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!") + _data = .; _sdata = .; RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN) @@ -323,9 +327,6 @@ SECTIONS *(.plt) *(.plt.*) *(.iplt) *(.igot .igot.plt) } ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!") - - .data.rel.ro : { *(.data.rel.ro) } - ASSERT(SIZEOF(.data.rel.ro) == 0, "Unexpected RELRO detected!") } #include "image-vars.h" diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index cf7e10587fb0..6eb992056c67 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -371,7 +371,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) if (err) return err; - return kvm_share_hyp(vcpu, vcpu + 1); + err = kvm_share_hyp(vcpu, vcpu + 1); + if (err) + kvm_vgic_vcpu_destroy(vcpu); + + return err; } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) @@ -876,7 +880,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (run->exit_reason == KVM_EXIT_MMIO) { ret = kvm_handle_mmio_return(vcpu); - if (ret) + if (ret <= 0) return ret; } @@ -1230,7 +1234,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, } vcpu_reset_hcr(vcpu); - vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT; /* * Handle the "start in power-off" case. diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index ec8e4494873d..3fd86b71ee37 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -49,8 +49,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) if (ret) return ret; - vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd); - /* * We need to keep current's task_struct pinned until its data has been * unshared with the hypervisor to make sure it is not re-used by the @@ -75,36 +73,20 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) { BUG_ON(!current->mm); - BUG_ON(test_thread_flag(TIF_SVE)); if (!system_supports_fpsimd()) return; - vcpu->arch.fp_state = FP_STATE_HOST_OWNED; - - vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); - if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) - vcpu_set_flag(vcpu, HOST_SVE_ENABLED); - /* - * We don't currently support SME guests but if we leave - * things in streaming mode then when the guest starts running - * FPSIMD or SVE code it may generate SME traps so as a - * special case if we are in streaming mode we force the host - * state to be saved now and exit streaming mode so that we - * don't have to handle any SME traps for valid guest - * operations. Do this for ZA as well for now for simplicity. + * Ensure that any host FPSIMD/SVE/SME state is saved and unbound such + * that the host kernel is responsible for restoring this state upon + * return to userspace, and the hyp code doesn't need to save anything. + * + * When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures + * that PSTATE.{SM,ZA} == {0,0}. */ - if (system_supports_sme()) { - vcpu_clear_flag(vcpu, HOST_SME_ENABLED); - if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN) - vcpu_set_flag(vcpu, HOST_SME_ENABLED); - - if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { - vcpu->arch.fp_state = FP_STATE_FREE; - fpsimd_save_and_flush_cpu_state(); - } - } + fpsimd_save_and_flush_cpu_state(); + vcpu->arch.fp_state = FP_STATE_FREE; } /* @@ -129,9 +111,16 @@ void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) */ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) { + enum fp_type fp_type; + WARN_ON_ONCE(!irqs_disabled()); if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { + if (vcpu_has_sve(vcpu)) + fp_type = FP_STATE_SVE; + else + fp_type = FP_STATE_FPSIMD; + /* * Currently we do not support SME guests so SVCR is * always 0 and we just need a variable to point to. @@ -139,10 +128,10 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) fpsimd_bind_state_to_cpu(&vcpu->arch.ctxt.fp_regs, vcpu->arch.sve_state, vcpu->arch.sve_max_vl, - NULL, 0, &vcpu->arch.svcr); + NULL, 0, &vcpu->arch.svcr, + &vcpu->arch.fp_type, fp_type); clear_thread_flag(TIF_FOREIGN_FPSTATE); - update_thread_flag(TIF_SVE, vcpu_has_sve(vcpu)); } } @@ -158,48 +147,19 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) local_irq_save(flags); - /* - * If we have VHE then the Hyp code will reset CPACR_EL1 to - * CPACR_EL1_DEFAULT and we need to reenable SME. - */ - if (has_vhe() && system_supports_sme()) { - /* Also restore EL0 state seen on entry */ - if (vcpu_get_flag(vcpu, HOST_SME_ENABLED)) - sysreg_clear_set(CPACR_EL1, 0, - CPACR_EL1_SMEN_EL0EN | - CPACR_EL1_SMEN_EL1EN); - else - sysreg_clear_set(CPACR_EL1, - CPACR_EL1_SMEN_EL0EN, - CPACR_EL1_SMEN_EL1EN); - } - if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { - if (vcpu_has_sve(vcpu)) { - __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); - - /* Restore the VL that was saved when bound to the CPU */ - if (!has_vhe()) - sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, - SYS_ZCR_EL1); - } - - fpsimd_save_and_flush_cpu_state(); - } else if (has_vhe() && system_supports_sve()) { /* - * The FPSIMD/SVE state in the CPU has not been touched, and we - * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been - * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE - * for EL0. To avoid spurious traps, restore the trap state - * seen by kvm_arch_vcpu_load_fp(): + * Flush (save and invalidate) the fpsimd/sve state so that if + * the host tries to use fpsimd/sve, it's not using stale data + * from the guest. + * + * Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the + * context unconditionally, in both nVHE and VHE. This allows + * the kernel to restore the fpsimd/sve state, including ZCR_EL1 + * when needed. */ - if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED)) - sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); - else - sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); + fpsimd_save_and_flush_cpu_state(); } - update_thread_flag(TIF_SVE, 0); - local_irq_restore(flags); } diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 435346ea1504..d8c94c45cb2f 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN alternative_else_nop_endif mrs x1, isr_el1 cbz x1, 1f + + // Ensure that __guest_enter() always provides a context + // synchronization event so that callers don't need ISBs for anything + // that would usually be synchonized by the ERET. + isb mov x0, #ARM_EXCEPTION_IRQ ret diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 081aca8f432e..275176e61d74 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -167,13 +167,68 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); } +static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu) +{ + u64 zcr_el1, zcr_el2; + + if (!guest_owns_fp_regs(vcpu)) + return; + + if (vcpu_has_sve(vcpu)) { + zcr_el2 = vcpu_sve_max_vq(vcpu) - 1; + + write_sysreg_el2(zcr_el2, SYS_ZCR); + + zcr_el1 = __vcpu_sys_reg(vcpu, ZCR_EL1); + write_sysreg_el1(zcr_el1, SYS_ZCR); + } +} + +static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu) +{ + u64 zcr_el1, zcr_el2; + + if (!guest_owns_fp_regs(vcpu)) + return; + + /* + * When the guest owns the FP regs, we know that guest+hyp traps for + * any FPSIMD/SVE/SME features exposed to the guest have been disabled + * by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd() + * prior to __guest_entry(). As __guest_entry() guarantees a context + * synchronization event, we don't need an ISB here to avoid taking + * traps for anything that was exposed to the guest. + */ + if (vcpu_has_sve(vcpu)) { + zcr_el1 = read_sysreg_el1(SYS_ZCR); + __vcpu_sys_reg(vcpu, ZCR_EL1) = zcr_el1; + + /* + * The guest's state is always saved using the guest's max VL. + * Ensure that the host has the guest's max VL active such that + * the host can save the guest's state lazily, but don't + * artificially restrict the host to the guest's max VL. + */ + if (has_vhe()) { + zcr_el2 = vcpu_sve_max_vq(vcpu) - 1; + write_sysreg_el2(zcr_el2, SYS_ZCR); + } else { + zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1; + write_sysreg_el2(zcr_el2, SYS_ZCR); + + zcr_el1 = vcpu_sve_max_vq(vcpu) - 1; + write_sysreg_el1(zcr_el1, SYS_ZCR); + } + } +} + /* * We trap the first access to the FP/SIMD to save the host context and * restore the guest context lazily. * If FP/SIMD is not implemented, handle the trap and inject an undefined * instruction exception to the guest. Similarly for trapped SVE accesses. */ -static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) { bool sve_guest; u8 esr_ec; @@ -207,10 +262,6 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) } isb(); - /* Write out the host state if it's in the registers */ - if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED) - __fpsimd_save_state(vcpu->arch.host_fpsimd_state); - /* Restore the guest state */ if (sve_guest) __hyp_sve_restore_guest(vcpu); @@ -335,7 +386,7 @@ static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) return true; } -static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) { if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && handle_tx2_tvm(vcpu)) @@ -351,7 +402,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) return false; } -static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) { if (static_branch_unlikely(&vgic_v3_cpuif_trap) && __vgic_v3_perform_cpuif_access(vcpu) == 1) @@ -360,19 +411,18 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) return false; } -static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, + u64 *exit_code) { if (!__populate_fault_info(vcpu)) return true; return false; } -static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) - __alias(kvm_hyp_handle_memory_fault); -static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) - __alias(kvm_hyp_handle_memory_fault); +#define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault +#define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault -static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) { if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) return true; @@ -402,23 +452,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); -static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); - -static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code); - /* * Allow the hypervisor to handle the exit with an exit handler if it has one. * * Returns true if the hypervisor handled the exit, and control should go back * to the guest, or false if it hasn't. */ -static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code, + const exit_handler_fn *handlers) { - const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); - exit_handler_fn fn; - - fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; - + exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; if (fn) return fn(vcpu, exit_code); @@ -448,20 +491,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code * the guest, false when we should restore the host state and return to the * main run loop. */ -static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code, + const exit_handler_fn *handlers) { - /* - * Save PSTATE early so that we can evaluate the vcpu mode - * early on. - */ - synchronize_vcpu_pstate(vcpu, exit_code); - - /* - * Check whether we want to repaint the state one way or - * another. - */ - early_exit_filter(vcpu, exit_code); - if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); @@ -491,7 +523,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) goto exit; /* Check if there's an exit handler and allow it to handle the exit. */ - if (kvm_hyp_handle_exit(vcpu, exit_code)) + if (kvm_hyp_handle_exit(vcpu, exit_code, handlers)) goto guest; exit: /* Return to the host kernel and handle the exit */ diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 3cea4b6ac23e..b183cc866404 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -5,6 +5,7 @@ */ #include +#include #include #include @@ -25,7 +26,9 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) { DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); + fpsimd_lazy_switch_to_guest(kern_hyp_va(vcpu)); cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu)); + fpsimd_lazy_switch_to_host(kern_hyp_va(vcpu)); } static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) @@ -285,11 +288,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) case ESR_ELx_EC_SMC64: handle_host_smc(host_ctxt); break; - case ESR_ELx_EC_SVE: - sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); - isb(); - sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); - break; case ESR_ELx_EC_IABT_LOW: case ESR_ELx_EC_DABT_LOW: handle_host_mem_abort(host_ctxt); diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 85d3b7ae720f..6042cdd3d887 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -9,6 +9,8 @@ #include #include +unsigned int kvm_host_sve_max_vl; + /* * Set trap register values based on features in ID_AA64PFR0. */ @@ -17,7 +19,6 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1); u64 hcr_set = HCR_RW; u64 hcr_clear = 0; - u64 cptr_set = 0; /* Protected KVM does not support AArch32 guests. */ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), @@ -44,16 +45,10 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) /* Trap AMU */ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) { hcr_clear |= HCR_AMVOFFEN; - cptr_set |= CPTR_EL2_TAM; } - /* Trap SVE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) - cptr_set |= CPTR_EL2_TZ; - vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 &= ~hcr_clear; - vcpu->arch.cptr_el2 |= cptr_set; } /* @@ -83,7 +78,6 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1); u64 mdcr_set = 0; u64 mdcr_clear = 0; - u64 cptr_set = 0; /* Trap/constrain PMU */ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) { @@ -110,13 +104,8 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids)) mdcr_set |= MDCR_EL2_TTRF; - /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) - cptr_set |= CPTR_EL2_TTA; - vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 &= ~mdcr_clear; - vcpu->arch.cptr_el2 |= cptr_set; } /* @@ -167,8 +156,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu) /* Clear res0 and set res1 bits to trap potential new features. */ vcpu->arch.hcr_el2 &= ~(HCR_RES0); vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); - vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; - vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); } /* diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 895fb3200076..47c7f3a675ae 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -36,23 +36,54 @@ DEFINE_PER_CPU(unsigned long, kvm_hyp_vector); extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc); -static void __activate_traps(struct kvm_vcpu *vcpu) +static void __activate_cptr_traps(struct kvm_vcpu *vcpu) { - u64 val; + u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */ - ___activate_traps(vcpu); - __activate_traps_common(vcpu); - - val = vcpu->arch.cptr_el2; - val |= CPTR_EL2_TTA | CPTR_EL2_TAM; - if (!guest_owns_fp_regs(vcpu)) { - val |= CPTR_EL2_TFP | CPTR_EL2_TZ; + if (!guest_owns_fp_regs(vcpu)) __activate_traps_fpsimd32(vcpu); - } - if (cpus_have_final_cap(ARM64_SME)) + + /* !hVHE case upstream */ + if (1) { + val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1; + + /* + * Always trap SME since it's not supported in KVM. + * TSM is RES1 if SME isn't implemented. + */ val |= CPTR_EL2_TSM; - write_sysreg(val, cptr_el2); + if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs(vcpu)) + val |= CPTR_EL2_TZ; + + if (!guest_owns_fp_regs(vcpu)) + val |= CPTR_EL2_TFP; + + write_sysreg(val, cptr_el2); + } +} + +static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu) +{ + /* !hVHE case upstream */ + if (1) { + u64 val = CPTR_NVHE_EL2_RES1; + + if (!cpus_have_final_cap(ARM64_SVE)) + val |= CPTR_EL2_TZ; + if (!cpus_have_final_cap(ARM64_SME)) + val |= CPTR_EL2_TSM; + + write_sysreg(val, cptr_el2); + } +} + +static void __activate_traps(struct kvm_vcpu *vcpu) +{ + ___activate_traps(vcpu); + __activate_traps_common(vcpu); + __activate_cptr_traps(vcpu); + write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { @@ -73,7 +104,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu) { extern char __kvm_hyp_host_vector[]; - u64 cptr; ___deactivate_traps(vcpu); @@ -98,13 +128,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); - cptr = CPTR_EL2_DEFAULT; - if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) - cptr |= CPTR_EL2_TZ; - if (cpus_have_final_cap(ARM64_SME)) - cptr &= ~CPTR_EL2_TSM; - - write_sysreg(cptr, cptr_el2); + __deactivate_cptr_traps(vcpu); write_sysreg(__kvm_hyp_host_vector, vbar_el2); } @@ -209,21 +233,22 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) return hyp_exit_handlers; } -/* - * Some guests (e.g., protected VMs) are not be allowed to run in AArch32. - * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a - * guest from dropping to AArch32 EL0 if implemented by the CPU. If the - * hypervisor spots a guest in such a state ensure it is handled, and don't - * trust the host to spot or fix it. The check below is based on the one in - * kvm_arch_vcpu_ioctl_run(). - * - * Returns false if the guest ran in AArch32 when it shouldn't have, and - * thus should exit to the host, or true if a the guest run loop can continue. - */ -static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) +static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) { + const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); struct kvm *kvm = kern_hyp_va(vcpu->kvm); + synchronize_vcpu_pstate(vcpu, exit_code); + + /* + * Some guests (e.g., protected VMs) are not be allowed to run in + * AArch32. The ARMv8 architecture does not give the hypervisor a + * mechanism to prevent a guest from dropping to AArch32 EL0 if + * implemented by the CPU. If the hypervisor spots a guest in such a + * state ensure it is handled, and don't trust the host to spot or fix + * it. The check below is based on the one in + * kvm_arch_vcpu_ioctl_run(). + */ if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) { /* * As we have caught the guest red-handed, decide that it isn't @@ -236,6 +261,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); *exit_code |= ARM_EXCEPTION_IL; } + + return __fixup_guest_exit(vcpu, exit_code, handlers); } /* Switch to the guest for legacy non-VHE systems */ diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 45ac4a59cc2c..179152bb9e42 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -114,13 +114,11 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, }; -static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) +static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) { - return hyp_exit_handlers; -} + synchronize_vcpu_pstate(vcpu, exit_code); -static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) -{ + return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers); } /* Switch to the guest for VHE systems running in EL2 */ @@ -136,6 +134,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_save_host_state_vhe(host_ctxt); + fpsimd_lazy_switch_to_guest(vcpu); + /* * ARM erratum 1165522 requires us to configure both stage 1 and * stage 2 translation for the guest context before we clear @@ -166,6 +166,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) __deactivate_traps(vcpu); + fpsimd_lazy_switch_to_host(vcpu); + sysreg_restore_host_state_vhe(host_ctxt); if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index 3dd38a151d2a..2aa503ff742e 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -72,6 +72,31 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len) return data; } +static bool kvm_pending_sync_exception(struct kvm_vcpu *vcpu) +{ + if (!vcpu_get_flag(vcpu, PENDING_EXCEPTION)) + return false; + + if (vcpu_el1_is_32bit(vcpu)) { + switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { + case unpack_vcpu_flag(EXCEPT_AA32_UND): + case unpack_vcpu_flag(EXCEPT_AA32_IABT): + case unpack_vcpu_flag(EXCEPT_AA32_DABT): + return true; + default: + return false; + } + } else { + switch (vcpu_get_flag(vcpu, EXCEPT_MASK)) { + case unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC): + case unpack_vcpu_flag(EXCEPT_AA64_EL2_SYNC): + return true; + default: + return false; + } + } +} + /** * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation * or in-kernel IO emulation @@ -84,9 +109,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) unsigned int len; int mask; - /* Detect an already handled MMIO return */ - if (unlikely(!vcpu->mmio_needed)) - return 0; + /* + * Detect if the MMIO return was already handled or if userspace aborted + * the MMIO access. + */ + if (unlikely(!vcpu->mmio_needed || kvm_pending_sync_exception(vcpu))) + return 1; vcpu->mmio_needed = 0; @@ -117,7 +145,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu) */ kvm_incr_pc(vcpu); - return 0; + return 1; } int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index c7e5f6a28c28..9622415e9e7a 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -294,7 +294,6 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); - reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index f9d070473614..54e00ee631a0 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -42,11 +42,14 @@ static u32 kvm_ipa_limit; PSR_AA32_I_BIT | PSR_AA32_F_BIT) unsigned int kvm_sve_max_vl; +unsigned int kvm_host_sve_max_vl; int kvm_arm_init_sve(void) { if (system_supports_sve()) { kvm_sve_max_vl = sve_max_virtualisable_vl(); + kvm_host_sve_max_vl = sve_max_vl(); + kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl; /* * The get_sve_reg()/set_sve_reg() ioctl interface will need diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 092327665a6e..5a8a181cf219 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -855,6 +855,9 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its, ite = find_ite(its, device_id, event_id); if (ite && its_is_collection_mapped(ite->collection)) { + struct its_device *device = find_its_device(its, device_id); + int ite_esz = vgic_its_get_abi(its)->ite_esz; + gpa_t gpa = device->itt_addr + ite->event_id * ite_esz; /* * Though the spec talks about removing the pending state, we * don't bother here since we clear the ITTE anyway and the @@ -863,7 +866,8 @@ static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its, vgic_its_invalidate_cache(kvm); its_free_ite(kvm, ite); - return 0; + + return vgic_its_write_entry_lock(its, gpa, 0, ite_esz); } return E_ITS_DISCARD_UNMAPPED_INTERRUPT; @@ -1211,9 +1215,11 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its, bool valid = its_cmd_get_validbit(its_cmd); u8 num_eventid_bits = its_cmd_get_size(its_cmd); gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd); + int dte_esz = vgic_its_get_abi(its)->dte_esz; struct its_device *device; + gpa_t gpa; - if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL)) + if (!vgic_its_check_id(its, its->baser_device_table, device_id, &gpa)) return E_ITS_MAPD_DEVICE_OOR; if (valid && num_eventid_bits > VITS_TYPER_IDBITS) @@ -1234,7 +1240,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its, * is an error, so we are done in any case. */ if (!valid) - return 0; + return vgic_its_write_entry_lock(its, gpa, 0, dte_esz); device = vgic_its_alloc_device(its, device_id, itt_addr, num_eventid_bits); @@ -2207,7 +2213,6 @@ static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz, static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, struct its_ite *ite, gpa_t gpa, int ite_esz) { - struct kvm *kvm = its->dev->kvm; u32 next_offset; u64 val; @@ -2216,7 +2221,8 @@ static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev, ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) | ite->collection->collection_id; val = cpu_to_le64(val); - return kvm_write_guest_lock(kvm, gpa, &val, ite_esz); + + return vgic_its_write_entry_lock(its, gpa, val, ite_esz); } /** @@ -2357,7 +2363,6 @@ static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev) static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, gpa_t ptr, int dte_esz) { - struct kvm *kvm = its->dev->kvm; u64 val, itt_addr_field; u32 next_offset; @@ -2368,7 +2373,8 @@ static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev, (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) | (dev->num_eventid_bits - 1)); val = cpu_to_le64(val); - return kvm_write_guest_lock(kvm, ptr, &val, dte_esz); + + return vgic_its_write_entry_lock(its, ptr, val, dte_esz); } /** @@ -2555,7 +2561,8 @@ static int vgic_its_save_cte(struct vgic_its *its, ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) | collection->collection_id); val = cpu_to_le64(val); - return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz); + + return vgic_its_write_entry_lock(its, gpa, val, esz); } /* @@ -2571,8 +2578,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) u64 val; int ret; - BUG_ON(esz > sizeof(val)); - ret = kvm_read_guest_lock(kvm, gpa, &val, esz); + ret = vgic_its_read_entry_lock(its, gpa, &val, esz); if (ret) return ret; val = le64_to_cpu(val); @@ -2610,7 +2616,6 @@ static int vgic_its_save_collection_table(struct vgic_its *its) u64 baser = its->baser_coll_table; gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser); struct its_collection *collection; - u64 val; size_t max_size, filled = 0; int ret, cte_esz = abi->cte_esz; @@ -2634,10 +2639,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its) * table is not fully filled, add a last dummy element * with valid bit unset */ - val = 0; - BUG_ON(cte_esz > sizeof(val)); - ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz); - return ret; + return vgic_its_write_entry_lock(its, gpa, 0, cte_esz); } /** diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 48e8b60ff1e3..7c0b23415ad9 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -555,6 +555,7 @@ static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu, unsigned long val) { struct vgic_irq *irq; + u32 intid; /* * If the guest wrote only to the upper 32bit part of the @@ -566,9 +567,13 @@ static void vgic_mmio_write_invlpi(struct kvm_vcpu *vcpu, if ((addr & 4) || !vgic_lpis_enabled(vcpu)) return; + intid = lower_32_bits(val); + if (intid < VGIC_MIN_LPI) + return; + vgic_set_rdist_busy(vcpu, true); - irq = vgic_get_irq(vcpu->kvm, NULL, lower_32_bits(val)); + irq = vgic_get_irq(vcpu->kvm, NULL, intid); if (irq) { vgic_its_inv_lpi(vcpu->kvm, irq); vgic_put_irq(vcpu->kvm, irq); diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index bc898229167b..056fcee46165 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -6,6 +6,7 @@ #define __KVM_ARM_VGIC_NEW_H__ #include +#include #define PRODUCT_ID_KVM 0x4b /* ASCII code K */ #define IMPLEMENTER_ARM 0x43b @@ -131,6 +132,29 @@ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq) return vgic_irq_get_lr_count(irq) > 1; } +static inline int vgic_its_read_entry_lock(struct vgic_its *its, gpa_t eaddr, + u64 *eval, unsigned long esize) +{ + struct kvm *kvm = its->dev->kvm; + + if (KVM_BUG_ON(esize != sizeof(*eval), kvm)) + return -EINVAL; + + return kvm_read_guest_lock(kvm, eaddr, eval, esize); + +} + +static inline int vgic_its_write_entry_lock(struct vgic_its *its, gpa_t eaddr, + u64 eval, unsigned long esize) +{ + struct kvm *kvm = its->dev->kvm; + + if (KVM_BUG_ON(esize != sizeof(eval), kvm)) + return -EINVAL; + + return kvm_write_guest_lock(kvm, eaddr, &eval, esize); +} + /* * This struct provides an intermediate representation of the fields contained * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC diff --git a/arch/arm64/lib/insn.c b/arch/arm64/lib/insn.c index 49e972beeac7..44bb90ee2f41 100644 --- a/arch/arm64/lib/insn.c +++ b/arch/arm64/lib/insn.c @@ -5,6 +5,7 @@ * * Copyright (C) 2014-2016 Zi Shen Lim */ +#include #include #include #include @@ -1630,43 +1631,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant, return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); } +static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type) +{ + switch (type) { + case AARCH64_INSN_MB_SY: + return 0xf; + case AARCH64_INSN_MB_ST: + return 0xe; + case AARCH64_INSN_MB_LD: + return 0xd; + case AARCH64_INSN_MB_ISH: + return 0xb; + case AARCH64_INSN_MB_ISHST: + return 0xa; + case AARCH64_INSN_MB_ISHLD: + return 0x9; + case AARCH64_INSN_MB_NSH: + return 0x7; + case AARCH64_INSN_MB_NSHST: + return 0x6; + case AARCH64_INSN_MB_NSHLD: + return 0x5; + default: + pr_err("%s: unknown barrier type %d\n", __func__, type); + return AARCH64_BREAK_FAULT; + } +} + u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) { u32 opt; u32 insn; - switch (type) { - case AARCH64_INSN_MB_SY: - opt = 0xf; - break; - case AARCH64_INSN_MB_ST: - opt = 0xe; - break; - case AARCH64_INSN_MB_LD: - opt = 0xd; - break; - case AARCH64_INSN_MB_ISH: - opt = 0xb; - break; - case AARCH64_INSN_MB_ISHST: - opt = 0xa; - break; - case AARCH64_INSN_MB_ISHLD: - opt = 0x9; - break; - case AARCH64_INSN_MB_NSH: - opt = 0x7; - break; - case AARCH64_INSN_MB_NSHST: - opt = 0x6; - break; - case AARCH64_INSN_MB_NSHLD: - opt = 0x5; - break; - default: - pr_err("%s: unknown dmb type %d\n", __func__, type); + opt = __get_barrier_crm_val(type); + if (opt == AARCH64_BREAK_FAULT) return AARCH64_BREAK_FAULT; - } insn = aarch64_insn_get_dmb_value(); insn &= ~GENMASK(11, 8); @@ -1674,3 +1673,18 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) return insn; } + +u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type) +{ + u32 opt, insn; + + opt = __get_barrier_crm_val(type); + if (opt == AARCH64_BREAK_FAULT) + return AARCH64_BREAK_FAULT; + + insn = aarch64_insn_get_dsb_base_value(); + insn &= ~GENMASK(11, 8); + insn |= (opt << 8); + + return insn; +} diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index e1e0dca01839..d0d9c99c8a0b 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -32,9 +32,9 @@ static unsigned long nr_pinned_asids; static unsigned long *pinned_asid_map; #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) -#define ASID_FIRST_VERSION (1UL << asid_bits) +#define ASID_FIRST_VERSION (1UL << 16) -#define NUM_USER_ASIDS ASID_FIRST_VERSION +#define NUM_USER_ASIDS (1UL << asid_bits) #define ctxid2asid(asid) ((asid) & ~ASID_MASK) #define asid2ctxid(asid, genid) ((asid) | (genid)) diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 134dcf6bc650..99810310efdd 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -544,6 +544,18 @@ pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, static int __init hugetlbpage_init(void) { + /* + * HugeTLB pages are supported on maximum four page table + * levels (PUD, CONT PMD, PMD, CONT PTE) for a given base + * page size, corresponding to hugetlb_add_hstate() calls + * here. + * + * HUGE_MAX_HSTATE should at least match maximum supported + * HugeTLB page sizes on the platform. Any new addition to + * supported HugeTLB page sizes will also require changing + * HUGE_MAX_HSTATE as well. + */ + BUILD_BUG_ON(HUGE_MAX_HSTATE < 4); if (pud_sect_supported()) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 6a4f118fb25f..cd14c3c22320 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1209,8 +1209,11 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, pmd_t *pmdp; WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + /* [start, end] should be within one section */ + WARN_ON_ONCE(end - start > PAGES_PER_SECTION * sizeof(struct page)); - if (!ARM64_KERNEL_USES_PMD_MAPS) + if (!ARM64_KERNEL_USES_PMD_MAPS || + (end - start < PAGES_PER_SECTION * sizeof(struct page))) return vmemmap_populate_basepages(start, end, node, altmap); do { @@ -1575,7 +1578,8 @@ int arch_add_memory(int nid, u64 start, u64 size, __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); else { - max_pfn = PFN_UP(start + size); + /* Address of hotplugged memory can be smaller */ + max_pfn = max(max_pfn, PFN_UP(start + size)); max_low_pfn = max_pfn; } diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index afb79209d413..3dd23050a6c8 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -7,6 +7,7 @@ #define pr_fmt(fmt) "bpf_jit: " fmt +#include #include #include #include @@ -17,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -653,7 +655,51 @@ static void build_plt(struct jit_ctx *ctx) plt->target = (u64)&dummy_tramp; } -static void build_epilogue(struct jit_ctx *ctx) +/* Clobbers BPF registers 1-4, aka x0-x3 */ +static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx) +{ + const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */ + u8 k = get_spectre_bhb_loop_value(); + + if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) || + cpu_mitigations_off() || __nospectre_bhb || + arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) + return; + + if (capable(CAP_SYS_ADMIN)) + return; + + if (supports_clearbhb(SCOPE_SYSTEM)) { + emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx); + return; + } + + if (k) { + emit_a64_mov_i64(r1, k, ctx); + emit(A64_B(1), ctx); + emit(A64_SUBS_I(true, r1, r1, 1), ctx); + emit(A64_B_(A64_COND_NE, -2), ctx); + emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx); + emit(aarch64_insn_get_isb_value(), ctx); + } + + if (is_spectre_bhb_fw_mitigated()) { + emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR, + ARM_SMCCC_ARCH_WORKAROUND_3), ctx); + switch (arm_smccc_1_1_get_conduit()) { + case SMCCC_CONDUIT_HVC: + emit(aarch64_insn_get_hvc_value(), ctx); + break; + case SMCCC_CONDUIT_SMC: + emit(aarch64_insn_get_smc_value(), ctx); + break; + default: + pr_err_once("Firmware mitigation enabled with unknown conduit\n"); + } + } +} + +static void build_epilogue(struct jit_ctx *ctx, bool was_classic) { const u8 r0 = bpf2a64[BPF_REG_0]; const u8 r6 = bpf2a64[BPF_REG_6]; @@ -675,10 +721,13 @@ static void build_epilogue(struct jit_ctx *ctx) emit(A64_POP(r8, r9, A64_SP), ctx); emit(A64_POP(r6, r7, A64_SP), ctx); + if (was_classic) + build_bhb_mitigation(ctx); + /* Restore FP/LR registers */ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); - /* Set return value */ + /* Move the return value from bpf:r0 (aka x7) to x0 */ emit(A64_MOV(1, A64_R(0), r0), ctx); /* Authenticate lr */ @@ -1527,7 +1576,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) } ctx.epilogue_offset = ctx.idx; - build_epilogue(&ctx); + build_epilogue(&ctx, was_classic); build_plt(&ctx); extable_align = __alignof__(struct exception_table_entry); @@ -1563,7 +1612,7 @@ skip_init_ctx: goto out_off; } - build_epilogue(&ctx); + build_epilogue(&ctx, was_classic); build_plt(&ctx); /* 3. Extra pass to validate JITed code. */ @@ -1757,6 +1806,12 @@ static void restore_args(struct jit_ctx *ctx, int args_off, int nargs) } } +static bool is_struct_ops_tramp(const struct bpf_tramp_links *fentry_links) +{ + return fentry_links->nr_links == 1 && + fentry_links->links[0]->link.type == BPF_LINK_TYPE_STRUCT_OPS; +} + /* Based on the x86's implementation of arch_prepare_bpf_trampoline(). * * bpf prog and function entry before bpf trampoline hooked: @@ -1786,6 +1841,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; bool save_ret; __le32 **branches = NULL; + bool is_struct_ops = is_struct_ops_tramp(fentry); /* trampoline stack layout: * [ parent ip ] @@ -1854,11 +1910,14 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, */ emit_bti(A64_BTI_JC, ctx); - /* frame for parent function */ - emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx); - emit(A64_MOV(1, A64_FP, A64_SP), ctx); + /* x9 is not set for struct_ops */ + if (!is_struct_ops) { + /* frame for parent function */ + emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx); + emit(A64_MOV(1, A64_FP, A64_SP), ctx); + } - /* frame for patched function */ + /* frame for patched function for tracing, or caller for struct_ops */ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); emit(A64_MOV(1, A64_FP, A64_SP), ctx); @@ -1883,7 +1942,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + /* for the first pass, assume the worst case */ + if (!ctx->image) + ctx->idx += 4; + else + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_enter, ctx); } @@ -1927,7 +1990,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->image + ctx->idx; - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + /* for the first pass, assume the worst case */ + if (!ctx->image) + ctx->idx += 4; + else + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_exit, ctx); } @@ -1944,19 +2011,24 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, /* reset SP */ emit(A64_MOV(1, A64_SP, A64_FP), ctx); - /* pop frames */ - emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); - emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx); - - if (flags & BPF_TRAMP_F_SKIP_FRAME) { - /* skip patched function, return to parent */ - emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); - emit(A64_RET(A64_R(9)), ctx); + if (is_struct_ops) { + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); + emit(A64_RET(A64_LR), ctx); } else { - /* return to patched function */ - emit(A64_MOV(1, A64_R(10), A64_LR), ctx); - emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); - emit(A64_RET(A64_R(10)), ctx); + /* pop frames */ + emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); + emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx); + + if (flags & BPF_TRAMP_F_SKIP_FRAME) { + /* skip patched function, return to parent */ + emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); + emit(A64_RET(A64_R(9)), ctx); + } else { + /* return to patched function */ + emit(A64_MOV(1, A64_R(10), A64_LR), ctx); + emit(A64_MOV(1, A64_LR, A64_R(9)), ctx); + emit(A64_RET(A64_R(10)), ctx); + } } if (ctx->image) diff --git a/arch/hexagon/Makefile b/arch/hexagon/Makefile index 92d005958dfb..ff172cbe5881 100644 --- a/arch/hexagon/Makefile +++ b/arch/hexagon/Makefile @@ -32,3 +32,9 @@ KBUILD_LDFLAGS += $(ldflags-y) TIR_NAME := r19 KBUILD_CFLAGS += -ffixed-$(TIR_NAME) -DTHREADINFO_REG=$(TIR_NAME) -D__linux__ KBUILD_AFLAGS += -DTHREADINFO_REG=$(TIR_NAME) + +# Disable HexagonConstExtenders pass for LLVM versions prior to 19.1.0 +# https://github.com/llvm/llvm-project/issues/99714 +ifneq ($(call clang-min-version, 190100),y) +KBUILD_CFLAGS += -mllvm -hexagon-cext=false +endif diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index cdb705e1496a..72c6e16c3f23 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -56,7 +56,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, __typeof__(ptr) __ptr = (ptr); \ __typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __new = (new); \ - __typeof__(*(ptr)) __oldval = 0; \ + __typeof__(*(ptr)) __oldval = (__typeof__(*(ptr))) 0; \ \ asm volatile( \ "1: %0 = memw_locked(%1);\n" \ diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index 6447763ce5a9..b7e394cebe20 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c @@ -195,8 +195,10 @@ int die(const char *str, struct pt_regs *regs, long err) printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter); if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) == - NOTIFY_STOP) + NOTIFY_STOP) { + spin_unlock_irq(&die.lock); return 1; + } print_modules(); show_regs(regs); diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index f4ba3638b76a..6c55d85b1c76 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -51,6 +51,7 @@ config LOONGARCH select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT @@ -310,8 +311,8 @@ config CMDLINE_BOOTLOADER config CMDLINE_EXTEND bool "Use built-in to extend bootloader kernel arguments" help - The command-line arguments provided during boot will be - appended to the built-in command line. This is useful in + The built-in command line will be appended to the command- + line arguments provided during boot. This is useful in cases where the provided arguments are insufficient and you don't want to or cannot modify them. diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile index ed47a3a87768..9d4a6ab7bad9 100644 --- a/arch/loongarch/Makefile +++ b/arch/loongarch/Makefile @@ -38,11 +38,11 @@ endif ifdef CONFIG_64BIT ld-emul = $(64bit-emul) -cflags-y += -mabi=lp64s +cflags-y += -mabi=lp64s -mcmodel=normal endif -cflags-y += -G0 -pipe -msoft-float -LDFLAGS_vmlinux += -G0 -static -n -nostdlib +cflags-y += -pipe -msoft-float +LDFLAGS_vmlinux += -static -n -nostdlib # When the assembler supports explicit relocation hint, we must use it. # GCC may have -mexplicit-relocs off by default if it was built with an old @@ -51,13 +51,18 @@ LDFLAGS_vmlinux += -G0 -static -n -nostdlib # When the assembler does not supports explicit relocation hint, we can't use # it. Disable it if the compiler supports it. # -# If you've seen "unknown reloc hint" message building the kernel and you are -# now wondering why "-mexplicit-relocs" is not wrapped with cc-option: the -# combination of a "new" assembler and "old" compiler is not supported. Either -# upgrade the compiler or downgrade the assembler. +# The combination of a "new" assembler and "old" GCC is not supported, given +# the rarity of this combo and the extra complexity needed to make it work. +# Either upgrade the compiler or downgrade the assembler; the build will error +# out if it is the case (by probing for the model attribute; all supported +# compilers in this case would have support). +# +# Also, -mdirect-extern-access is useful in case of building with explicit +# relocs, for avoiding unnecessary GOT accesses. It is harmless to not have +# support though. ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS -cflags-y += -mexplicit-relocs -KBUILD_CFLAGS_KERNEL += -mdirect-extern-access +cflags-y += $(call cc-option,-mexplicit-relocs) +KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access) else cflags-y += $(call cc-option,-mno-explicit-relocs) KBUILD_AFLAGS_KERNEL += -Wa,-mla-global-with-pcrel diff --git a/arch/loongarch/include/asm/cache.h b/arch/loongarch/include/asm/cache.h index 1b6d09617199..aa622c754414 100644 --- a/arch/loongarch/include/asm/cache.h +++ b/arch/loongarch/include/asm/cache.h @@ -8,6 +8,8 @@ #define L1_CACHE_SHIFT CONFIG_L1_CACHE_SHIFT #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) +#define ARCH_DMA_MINALIGN (16) + #define __read_mostly __section(".data..read_mostly") #endif /* _ASM_CACHE_H */ diff --git a/arch/loongarch/include/asm/hugetlb.h b/arch/loongarch/include/asm/hugetlb.h index aa44b3fe43dd..427b487fbfd6 100644 --- a/arch/loongarch/include/asm/hugetlb.h +++ b/arch/loongarch/include/asm/hugetlb.h @@ -29,6 +29,16 @@ static inline int prepare_hugepage_range(struct file *file, return 0; } +#define __HAVE_ARCH_HUGE_PTE_CLEAR +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + pte_t clear; + + pte_val(clear) = (unsigned long)invalid_pte_table; + set_pte_at(mm, addr, ptep, clear); +} + #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) diff --git a/arch/loongarch/include/asm/page.h b/arch/loongarch/include/asm/page.h index bbac81dd7378..9919253804e6 100644 --- a/arch/loongarch/include/asm/page.h +++ b/arch/loongarch/include/asm/page.h @@ -102,10 +102,7 @@ static inline int pfn_valid(unsigned long pfn) extern int __virt_addr_valid(volatile void *kaddr); #define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr)) -#define VM_DATA_DEFAULT_FLAGS \ - (VM_READ | VM_WRITE | \ - ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC #include #include diff --git a/arch/loongarch/include/asm/percpu.h b/arch/loongarch/include/asm/percpu.h index c90c56094168..7e804140500f 100644 --- a/arch/loongarch/include/asm/percpu.h +++ b/arch/loongarch/include/asm/percpu.h @@ -14,7 +14,11 @@ * loaded. Tell the compiler this fact when using explicit relocs. */ #if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS) -#define PER_CPU_ATTRIBUTES __attribute__((model("extreme"))) +# if __has_attribute(model) +# define PER_CPU_ATTRIBUTES __attribute__((model("extreme"))) +# else +# error compiler support for the model attribute is necessary when a recent assembler is used +# endif #endif /* Use r21 for fast access */ diff --git a/arch/loongarch/include/asm/ptrace.h b/arch/loongarch/include/asm/ptrace.h index 59c4608de91d..af86241ae08f 100644 --- a/arch/loongarch/include/asm/ptrace.h +++ b/arch/loongarch/include/asm/ptrace.h @@ -32,9 +32,9 @@ struct pt_regs { unsigned long __last[]; } __aligned(8); -static inline int regs_irqs_disabled(struct pt_regs *regs) +static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) { - return arch_irqs_disabled_flags(regs->csr_prmd); + return !(regs->csr_prmd & CSR_PRMD_PIE); } static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) @@ -54,7 +54,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v /* Query offset/name of register from its name/offset */ extern int regs_query_register_offset(const char *name); -#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) +#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long)) /** * regs_get_register() - get register value from its offset diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 8319cc409009..4dd55a50d89e 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -173,18 +173,6 @@ static __init int setup_node(int pxm) return acpi_map_pxm_to_node(pxm); } -/* - * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for - * I/O localities since SRAT does not list them. I/O localities are - * not supported at this point. - */ -unsigned int numa_distance_cnt; - -static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit) -{ - return slit->locality_count; -} - void __init numa_set_distance(int from, int to, int distance) { if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) { diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c index 2dcb9e003657..30aa420610a0 100644 --- a/arch/loongarch/kernel/machine_kexec.c +++ b/arch/loongarch/kernel/machine_kexec.c @@ -126,14 +126,14 @@ void kexec_reboot(void) /* All secondary cpus go to kexec_smp_wait */ if (smp_processor_id() > 0) { relocated_kexec_smp_wait(NULL); - unreachable(); + BUG(); } #endif do_kexec = (void *)reboot_code_buffer; do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry); - unreachable(); + BUG(); } diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index ba138117b124..70b4a51885c2 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, pmd = pmd_offset(pud, addr); } } - return (pte_t *) pmd; + return pmd_none(*pmd) ? NULL : (pte_t *) pmd; } /* diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index f42a3be5f28d..3861ae6942a0 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -89,9 +89,6 @@ void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; -#ifdef CONFIG_ZONE_DMA - max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; -#endif #ifdef CONFIG_ZONE_DMA32 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; #endif diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c index eeb2d815cfa2..faf44cac403b 100644 --- a/arch/loongarch/mm/tlb.c +++ b/arch/loongarch/mm/tlb.c @@ -279,7 +279,7 @@ void setup_tlb_handler(int cpu) /* Avoid lockdep warning */ rcu_cpu_starting(cpu); -#ifdef CONFIG_NUMA +#if defined(CONFIG_NUMA) && !defined(CONFIG_PREEMPT_RT) vec_sz = sizeof(exception_handlers); if (pcpu_handlers[cpu]) diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index 4e86441e6319..93e9110aafa1 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -179,7 +179,7 @@ static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call) if (!is_tail_call) { /* Set return value */ - move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]); + emit_insn(ctx, addiw, LOONGARCH_GPR_A0, regmap[BPF_REG_0], 0); /* Return to the caller */ emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0); } else { @@ -808,7 +808,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext { const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm; - move_imm(ctx, dst, imm64, is32); + if (bpf_pseudo_func(insn)) + move_addr(ctx, dst, imm64); + else + move_imm(ctx, dst, imm64, is32); return 1; } diff --git a/arch/loongarch/vdso/Makefile b/arch/loongarch/vdso/Makefile index d89e2ac75f7b..ed196e42972c 100644 --- a/arch/loongarch/vdso/Makefile +++ b/arch/loongarch/vdso/Makefile @@ -23,7 +23,7 @@ endif cflags-vdso := $(ccflags-vdso) \ -isystem $(shell $(CC) -print-file-name=include) \ $(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \ - -O2 -g -fno-strict-aliasing -fno-common -fno-builtin -G0 \ + -std=gnu11 -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \ -fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \ $(call cc-option, -fno-asynchronous-unwind-tables) \ $(call cc-option, -fno-stack-protector) diff --git a/arch/m68k/coldfire/device.c b/arch/m68k/coldfire/device.c index 7dab46728aed..b6958ec2a220 100644 --- a/arch/m68k/coldfire/device.c +++ b/arch/m68k/coldfire/device.c @@ -93,7 +93,7 @@ static struct platform_device mcf_uart = { .dev.platform_data = mcf_uart_platform_data, }; -#if IS_ENABLED(CONFIG_FEC) +#ifdef MCFFEC_BASE0 #ifdef CONFIG_M5441x #define FEC_NAME "enet-fec" @@ -145,6 +145,7 @@ static struct platform_device mcf_fec0 = { .platform_data = FEC_PDATA, } }; +#endif /* MCFFEC_BASE0 */ #ifdef MCFFEC_BASE1 static struct resource mcf_fec1_resources[] = { @@ -182,7 +183,6 @@ static struct platform_device mcf_fec1 = { } }; #endif /* MCFFEC_BASE1 */ -#endif /* CONFIG_FEC */ #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) /* @@ -624,12 +624,12 @@ static struct platform_device mcf_flexcan0 = { static struct platform_device *mcf_devices[] __initdata = { &mcf_uart, -#if IS_ENABLED(CONFIG_FEC) +#ifdef MCFFEC_BASE0 &mcf_fec0, +#endif #ifdef MCFFEC_BASE1 &mcf_fec1, #endif -#endif #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) &mcf_qspi, #endif diff --git a/arch/m68k/include/asm/mcfgpio.h b/arch/m68k/include/asm/mcfgpio.h index 27f32cc81da6..02049568198c 100644 --- a/arch/m68k/include/asm/mcfgpio.h +++ b/arch/m68k/include/asm/mcfgpio.h @@ -144,7 +144,7 @@ static inline void gpio_free(unsigned gpio) * read-modify-write as well as those controlled by the EPORT and GPIO modules. */ #define MCFGPIO_SCR_START 40 -#elif defined(CONFIGM5441x) +#elif defined(CONFIG_M5441x) /* The m5441x EPORT doesn't have its own GPIO port, uses PORT C */ #define MCFGPIO_SCR_START 0 #else diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h index e28eb1c0e0bf..dbf88059e47a 100644 --- a/arch/m68k/include/asm/mvme147hw.h +++ b/arch/m68k/include/asm/mvme147hw.h @@ -93,8 +93,8 @@ struct pcc_regs { #define M147_SCC_B_ADDR 0xfffe3000 #define M147_SCC_PCLK 5000000 -#define MVME147_IRQ_SCSI_PORT (IRQ_USER+0x45) -#define MVME147_IRQ_SCSI_DMA (IRQ_USER+0x46) +#define MVME147_IRQ_SCSI_PORT (IRQ_USER + 5) +#define MVME147_IRQ_SCSI_DMA (IRQ_USER + 6) /* SCC interrupts, for MVME147 */ diff --git a/arch/m68k/include/asm/vga.h b/arch/m68k/include/asm/vga.h index 4742e6bc3ab8..cdd414fa8710 100644 --- a/arch/m68k/include/asm/vga.h +++ b/arch/m68k/include/asm/vga.h @@ -9,7 +9,7 @@ */ #ifndef CONFIG_PCI -#include +#include #include /* @@ -29,9 +29,9 @@ #define inw_p(port) 0 #define outb_p(port, val) do { } while (0) #define outw(port, val) do { } while (0) -#define readb raw_inb -#define writeb raw_outb -#define writew raw_outw +#define readb __raw_readb +#define writeb __raw_writeb +#define writew __raw_writew #endif /* CONFIG_PCI */ #endif /* _ASM_M68K_VGA_H */ diff --git a/arch/m68k/kernel/early_printk.c b/arch/m68k/kernel/early_printk.c index 7d3fe08a48eb..f11ef9f1f56f 100644 --- a/arch/m68k/kernel/early_printk.c +++ b/arch/m68k/kernel/early_printk.c @@ -12,8 +12,9 @@ #include #include -extern void mvme16x_cons_write(struct console *co, - const char *str, unsigned count); + +#include "../mvme147/mvme147.h" +#include "../mvme16x/mvme16x.h" asmlinkage void __init debug_cons_nputs(const char *s, unsigned n); @@ -22,7 +23,9 @@ static void __ref debug_cons_write(struct console *c, { #if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \ defined(CONFIG_COLDFIRE)) - if (MACH_IS_MVME16x) + if (MACH_IS_MVME147) + mvme147_scc_write(c, s, n); + else if (MACH_IS_MVME16x) mvme16x_cons_write(c, s, n); else debug_cons_nputs(s, n); diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c index 4e6218115f43..95d4a7e13b33 100644 --- a/arch/m68k/mvme147/config.c +++ b/arch/m68k/mvme147/config.c @@ -35,6 +35,7 @@ #include #include +#include "mvme147.h" static void mvme147_get_model(char *model); extern void mvme147_sched_init(void); @@ -188,3 +189,32 @@ int mvme147_hwclk(int op, struct rtc_time *t) } return 0; } + +static void scc_delay(void) +{ + __asm__ __volatile__ ("nop; nop;"); +} + +static void scc_write(char ch) +{ + do { + scc_delay(); + } while (!(in_8(M147_SCC_A_ADDR) & BIT(2))); + scc_delay(); + out_8(M147_SCC_A_ADDR, 8); + scc_delay(); + out_8(M147_SCC_A_ADDR, ch); +} + +void mvme147_scc_write(struct console *co, const char *str, unsigned int count) +{ + unsigned long flags; + + local_irq_save(flags); + while (count--) { + if (*str == '\n') + scc_write('\r'); + scc_write(*str++); + } + local_irq_restore(flags); +} diff --git a/arch/m68k/mvme147/mvme147.h b/arch/m68k/mvme147/mvme147.h new file mode 100644 index 000000000000..140bc98b0102 --- /dev/null +++ b/arch/m68k/mvme147/mvme147.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +struct console; + +/* config.c */ +void mvme147_scc_write(struct console *co, const char *str, unsigned int count); diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c index f00c7aa058de..2b7eac224138 100644 --- a/arch/m68k/mvme16x/config.c +++ b/arch/m68k/mvme16x/config.c @@ -38,6 +38,8 @@ #include #include +#include "mvme16x.h" + extern t_bdid mvme_bdid; static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE; diff --git a/arch/m68k/mvme16x/mvme16x.h b/arch/m68k/mvme16x/mvme16x.h new file mode 100644 index 000000000000..159c34b70039 --- /dev/null +++ b/arch/m68k/mvme16x/mvme16x.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +struct console; + +/* config.c */ +void mvme16x_cons_write(struct console *co, const char *str, unsigned count); diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c index c892e173ec99..a8553f54152b 100644 --- a/arch/microblaze/kernel/microblaze_ksyms.c +++ b/arch/microblaze/kernel/microblaze_ksyms.c @@ -16,6 +16,7 @@ #include #include #include +#include #ifdef CONFIG_FUNCTION_TRACER extern void _mcount(void); @@ -46,3 +47,12 @@ extern void __udivsi3(void); EXPORT_SYMBOL(__udivsi3); extern void __umodsi3(void); EXPORT_SYMBOL(__umodsi3); + +#ifdef CONFIG_MB_MANAGER +extern void xmb_manager_register(uintptr_t phys_baseaddr, u32 cr_val, + void (*callback)(void *data), + void *priv, void (*reset_callback)(void *data)); +EXPORT_SYMBOL(xmb_manager_register); +extern asmlinkage void xmb_inject_err(void); +EXPORT_SYMBOL(xmb_inject_err); +#endif diff --git a/arch/mips/Makefile b/arch/mips/Makefile index dd6486097e1d..6468f1eb39f3 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -304,7 +304,7 @@ drivers-$(CONFIG_PCI) += arch/mips/pci/ ifdef CONFIG_64BIT ifndef KBUILD_SYM32 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000), 0) - KBUILD_SYM32 = y + KBUILD_SYM32 = $(call cc-option-yn, -msym32) endif endif diff --git a/arch/mips/boot/dts/loongson/loongson64g_4core_ls7a.dts b/arch/mips/boot/dts/loongson/loongson64g_4core_ls7a.dts index c945f8565d54..fb180cb2b8e2 100644 --- a/arch/mips/boot/dts/loongson/loongson64g_4core_ls7a.dts +++ b/arch/mips/boot/dts/loongson/loongson64g_4core_ls7a.dts @@ -33,6 +33,7 @@ compatible = "loongson,pch-msi-1.0"; reg = <0 0x2ff00000 0 0x8>; interrupt-controller; + #interrupt-cells = <1>; msi-controller; loongson,msi-base-vec = <64>; loongson,msi-num-vecs = <192>; diff --git a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi index ed99ee316feb..1292e9ab282a 100644 --- a/arch/mips/boot/dts/loongson/ls7a-pch.dtsi +++ b/arch/mips/boot/dts/loongson/ls7a-pch.dtsi @@ -63,7 +63,6 @@ device_type = "pci"; #address-cells = <3>; #size-cells = <2>; - #interrupt-cells = <2>; msi-parent = <&msi>; reg = <0 0x1a000000 0 0x02000000>, @@ -227,7 +226,7 @@ }; }; - pci_bridge@9,0 { + pcie@9,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -237,12 +236,16 @@ interrupts = <32 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 32 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@a,0 { + pcie@a,0 { compatible = "pci0014,7a09.1", "pci0014,7a09", "pciclass060400", @@ -252,12 +255,16 @@ interrupts = <33 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 33 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@b,0 { + pcie@b,0 { compatible = "pci0014,7a09.1", "pci0014,7a09", "pciclass060400", @@ -267,12 +274,16 @@ interrupts = <34 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 34 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@c,0 { + pcie@c,0 { compatible = "pci0014,7a09.1", "pci0014,7a09", "pciclass060400", @@ -282,12 +293,16 @@ interrupts = <35 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 35 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@d,0 { + pcie@d,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -297,12 +312,16 @@ interrupts = <36 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 36 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@e,0 { + pcie@e,0 { compatible = "pci0014,7a09.1", "pci0014,7a09", "pciclass060400", @@ -312,12 +331,16 @@ interrupts = <37 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 37 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@f,0 { + pcie@f,0 { compatible = "pci0014,7a29.1", "pci0014,7a29", "pciclass060400", @@ -327,12 +350,16 @@ interrupts = <40 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 40 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@10,0 { + pcie@10,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -342,12 +369,16 @@ interrupts = <41 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 41 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@11,0 { + pcie@11,0 { compatible = "pci0014,7a29.1", "pci0014,7a29", "pciclass060400", @@ -357,12 +388,16 @@ interrupts = <42 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 42 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@12,0 { + pcie@12,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -372,12 +407,16 @@ interrupts = <43 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 43 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@13,0 { + pcie@13,0 { compatible = "pci0014,7a29.1", "pci0014,7a29", "pciclass060400", @@ -387,12 +426,16 @@ interrupts = <38 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 38 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; - pci_bridge@14,0 { + pcie@14,0 { compatible = "pci0014,7a19.1", "pci0014,7a19", "pciclass060400", @@ -402,9 +445,13 @@ interrupts = <39 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&pic>; + #address-cells = <3>; + #size-cells = <2>; + device_type = "pci"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &pic 39 IRQ_TYPE_LEVEL_HIGH>; + ranges; }; }; diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c index cb12eb211a49..8d74d7d6c05b 100644 --- a/arch/mips/dec/prom/init.c +++ b/arch/mips/dec/prom/init.c @@ -42,7 +42,7 @@ int (*__pmax_close)(int); * Detect which PROM the DECSTATION has, and set the callback vectors * appropriately. */ -void __init which_prom(s32 magic, s32 *prom_vec) +static void __init which_prom(s32 magic, s32 *prom_vec) { /* * No sign of the REX PROM's magic number means we assume a non-REX diff --git a/arch/mips/include/asm/ds1287.h b/arch/mips/include/asm/ds1287.h index 46cfb01f9a14..51cb61fd4c03 100644 --- a/arch/mips/include/asm/ds1287.h +++ b/arch/mips/include/asm/ds1287.h @@ -8,7 +8,7 @@ #define __ASM_DS1287_H extern int ds1287_timer_state(void); -extern void ds1287_set_base_clock(unsigned int clock); +extern int ds1287_set_base_clock(unsigned int hz); extern int ds1287_clockevent_init(int irq); #endif diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h index db497a8167da..e3212f44446f 100644 --- a/arch/mips/include/asm/ftrace.h +++ b/arch/mips/include/asm/ftrace.h @@ -87,4 +87,20 @@ struct dyn_arch_ftrace { #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* __ASSEMBLY__ */ #endif /* CONFIG_FUNCTION_TRACER */ + +#ifdef CONFIG_FTRACE_SYSCALLS +#ifndef __ASSEMBLY__ +/* + * Some syscall entry functions on mips start with "__sys_" (fork and clone, + * for instance). We should also match the sys_ variant with those. + */ +#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME +static inline bool arch_syscall_match_sym_name(const char *sym, + const char *name) +{ + return !strcmp(sym, name) || + (!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4)); +} +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_FTRACE_SYSCALLS */ #endif /* _ASM_MIPS_FTRACE_H */ diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index 696b40beb774..8494466740cc 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -47,6 +47,16 @@ extern phys_addr_t __mips_cm_phys_base(void); */ extern int mips_cm_is64; +/* + * mips_cm_is_l2_hci_broken - determine if HCI is broken + * + * Some CM reports show that Hardware Cache Initialization is + * complete, but in reality it's not the case. They also incorrectly + * indicate that Hardware Cache Initialization is supported. This + * flags allows warning about this broken feature. + */ +extern bool mips_cm_is_l2_hci_broken; + /** * mips_cm_error_report - Report CM cache errors */ @@ -85,6 +95,18 @@ static inline bool mips_cm_present(void) #endif } +/** + * mips_cm_update_property - update property from the device tree + * + * Retrieve the properties from the device tree if a CM node exist and + * update the internal variable based on this. + */ +#ifdef CONFIG_MIPS_CM +extern void mips_cm_update_property(void); +#else +static inline void mips_cm_update_property(void) {} +#endif + /** * mips_cm_has_l2sync - determine whether an L2-only sync region is present * diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 99eeafe6dcab..c60e72917a28 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -2078,7 +2078,14 @@ do { \ _ASM_INSN_IF_MIPS(0x4200000c) \ _ASM_INSN32_IF_MM(0x0000517c) #else /* !TOOLCHAIN_SUPPORTS_VIRT */ -#define _ASM_SET_VIRT ".set\tvirt\n\t" +#if MIPS_ISA_REV >= 5 +#define _ASM_SET_VIRT_ISA +#elif defined(CONFIG_64BIT) +#define _ASM_SET_VIRT_ISA ".set\tmips64r5\n\t" +#else +#define _ASM_SET_VIRT_ISA ".set\tmips32r5\n\t" +#endif +#define _ASM_SET_VIRT _ASM_SET_VIRT_ISA ".set\tvirt\n\t" #define _ASM_SET_MFGC0 _ASM_SET_VIRT #define _ASM_SET_DMFGC0 _ASM_SET_VIRT #define _ASM_SET_MTGC0 _ASM_SET_VIRT @@ -2099,7 +2106,6 @@ do { \ ({ int __res; \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips32r5\n\t" \ _ASM_SET_MFGC0 \ "mfgc0\t%0, " #source ", %1\n\t" \ _ASM_UNSET_MFGC0 \ @@ -2113,7 +2119,6 @@ do { \ ({ unsigned long long __res; \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips64r5\n\t" \ _ASM_SET_DMFGC0 \ "dmfgc0\t%0, " #source ", %1\n\t" \ _ASM_UNSET_DMFGC0 \ @@ -2127,7 +2132,6 @@ do { \ do { \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips32r5\n\t" \ _ASM_SET_MTGC0 \ "mtgc0\t%z0, " #register ", %1\n\t" \ _ASM_UNSET_MTGC0 \ @@ -2140,7 +2144,6 @@ do { \ do { \ __asm__ __volatile__( \ ".set\tpush\n\t" \ - ".set\tmips64r5\n\t" \ _ASM_SET_DMTGC0 \ "dmtgc0\t%z0, " #register ", %1\n\t" \ _ASM_UNSET_DMTGC0 \ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 428b9f1cf1de..841612913f0d 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs, /* Query offset/name of register from its name/offset */ extern int regs_query_register_offset(const char *name); -#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) +#define MAX_REG_OFFSET \ + (offsetof(struct pt_regs, __last) - sizeof(unsigned long)) /** * regs_get_register() - get register value from its offset @@ -155,6 +156,8 @@ static inline long regs_return_value(struct pt_regs *regs) } #define instruction_pointer(regs) ((regs)->cp0_epc) +extern unsigned long exception_ip(struct pt_regs *regs); +#define exception_ip(regs) exception_ip(regs) #define profile_pc(regs) instruction_pointer(regs) extern asmlinkage long syscall_trace_enter(struct pt_regs *regs); diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index a4374b4cb88f..d6ccd5344021 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -97,7 +97,7 @@ do { \ } \ } while (0) #else -# define __sanitize_fcr31(next) +# define __sanitize_fcr31(next) do { (void) (next); } while (0) #endif /* diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c index 9a47fbcd4638..de64d6bb7ba3 100644 --- a/arch/mips/kernel/cevt-ds1287.c +++ b/arch/mips/kernel/cevt-ds1287.c @@ -10,6 +10,7 @@ #include #include +#include #include int ds1287_timer_state(void) diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 8c401e42301c..f39e85fd58fa 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -248,7 +248,7 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long +static unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { unsigned long sp, ip, tmp; diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index b4f7d950c846..e21c2fd76167 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -5,6 +5,7 @@ */ #include +#include #include #include @@ -14,6 +15,7 @@ void __iomem *mips_gcr_base; void __iomem *mips_cm_l2sync_base; int mips_cm_is64; +bool mips_cm_is_l2_hci_broken; static char *cm2_tr[8] = { "mem", "gcr", "gic", "mmio", @@ -238,6 +240,18 @@ static void mips_cm_probe_l2sync(void) mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE); } +void mips_cm_update_property(void) +{ + struct device_node *cm_node; + + cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm"); + if (!cm_node) + return; + pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken"); + mips_cm_is_l2_hci_broken = true; + of_node_put(cm_node); +} + int mips_cm_probe(void) { phys_addr_t addr; diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 9bf60d7d44d3..a7bcf2b814c8 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -56,10 +56,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); /* Indicates online CPUs coupled with the current CPU */ static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); -/* - * Used to synchronize entry to deep idle states. Actually per-core rather - * than per-CPU. - */ +/* Used to synchronize entry to deep idle states */ static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); /* Saved CPU state across the CPS_PM_POWER_GATED state */ @@ -118,9 +115,10 @@ int cps_pm_enter_state(enum cps_pm_state state) cps_nc_entry_fn entry; struct core_boot_config *core_cfg; struct vpe_boot_config *vpe_cfg; + atomic_t *barrier; /* Check that there is an entry function for this state */ - entry = per_cpu(nc_asm_enter, core)[state]; + entry = per_cpu(nc_asm_enter, cpu)[state]; if (!entry) return -EINVAL; @@ -156,7 +154,7 @@ int cps_pm_enter_state(enum cps_pm_state state) smp_mb__after_atomic(); /* Create a non-coherent mapping of the core ready_count */ - core_ready_count = per_cpu(ready_count, core); + core_ready_count = per_cpu(ready_count, cpu); nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), (unsigned long)core_ready_count); nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); @@ -164,7 +162,8 @@ int cps_pm_enter_state(enum cps_pm_state state) /* Ensure ready_count is zero-initialised before the assembly runs */ WRITE_ONCE(*nc_core_ready_count, 0); - coupled_barrier(&per_cpu(pm_barrier, core), online); + barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu])); + coupled_barrier(barrier, online); /* Run the generated entry code */ left = entry(online, nc_core_ready_count); @@ -635,12 +634,14 @@ out_err: static int cps_pm_online_cpu(unsigned int cpu) { - enum cps_pm_state state; - unsigned core = cpu_core(&cpu_data[cpu]); + unsigned int sibling, core; void *entry_fn, *core_rc; + enum cps_pm_state state; + + core = cpu_core(&cpu_data[cpu]); for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { - if (per_cpu(nc_asm_enter, core)[state]) + if (per_cpu(nc_asm_enter, cpu)[state]) continue; if (!test_bit(state, state_support)) continue; @@ -652,16 +653,19 @@ static int cps_pm_online_cpu(unsigned int cpu) clear_bit(state, state_support); } - per_cpu(nc_asm_enter, core)[state] = entry_fn; + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(nc_asm_enter, sibling)[state] = entry_fn; } - if (!per_cpu(ready_count, core)) { + if (!per_cpu(ready_count, cpu)) { core_rc = kmalloc(sizeof(u32), GFP_KERNEL); if (!core_rc) { pr_err("Failed allocate core %u ready_count\n", core); return -ENOMEM; } - per_cpu(ready_count, core) = core_rc; + + for_each_cpu(sibling, &cpu_sibling_map[cpu]) + per_cpu(ready_count, sibling) = core_rc; } return 0; diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index a8e569830ec8..fdcaad9fba1d 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c @@ -31,6 +31,7 @@ #include #include +#include #include #include #include @@ -48,6 +49,12 @@ #define CREATE_TRACE_POINTS #include +unsigned long exception_ip(struct pt_regs *regs) +{ + return exception_epc(regs); +} +EXPORT_SYMBOL(exception_ip); + /* * Called by kernel/ptrace.c when detaching.. * diff --git a/arch/mips/loongson64/boardinfo.c b/arch/mips/loongson64/boardinfo.c index 280989c5a137..8bb275c93ac0 100644 --- a/arch/mips/loongson64/boardinfo.c +++ b/arch/mips/loongson64/boardinfo.c @@ -21,13 +21,11 @@ static ssize_t boardinfo_show(struct kobject *kobj, "BIOS Info\n" "Vendor\t\t\t: %s\n" "Version\t\t\t: %s\n" - "ROM Size\t\t: %d KB\n" "Release Date\t\t: %s\n", strsep(&tmp_board_manufacturer, "-"), eboard->name, strsep(&tmp_bios_vendor, "-"), einter->description, - einter->size, especial->special_name); } static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444, diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 265bc57819df..c89e70df43d8 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -1660,7 +1660,7 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, break; } - case 0x3: + case 0x7: if (MIPSInst_FUNC(ir) != pfetch_op) return SIGILL; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 3341d4a42199..3a32b49d7ad0 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -18,6 +18,7 @@ config PARISC select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_STACKWALK + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select HAVE_RELIABLE_STACKTRACE select DMA_OPS diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h index e23d06b51a20..91e753f08eaa 100644 --- a/arch/parisc/include/asm/cache.h +++ b/arch/parisc/include/asm/cache.h @@ -20,7 +20,16 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#ifdef CONFIG_PA20 +#define ARCH_DMA_MINALIGN 128 +#else +#define ARCH_DMA_MINALIGN 32 +#endif +#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */ + +#define arch_slab_minalign() ((unsigned)dcache_stride) +#define cache_line_size() dcache_stride +#define dma_get_cache_alignment cache_line_size #define __read_mostly __section(".data..read_mostly") diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index ac7253891d5e..2e75e9b85318 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c @@ -87,7 +87,7 @@ int ftrace_enable_ftrace_graph_caller(void) int ftrace_disable_ftrace_graph_caller(void) { - static_key_enable(&ftrace_graph_enable.key); + static_key_disable(&ftrace_graph_enable.key); return 0; } #endif diff --git a/arch/parisc/kernel/pdt.c b/arch/parisc/kernel/pdt.c index e391b175f5ec..7dbb241a9fb7 100644 --- a/arch/parisc/kernel/pdt.c +++ b/arch/parisc/kernel/pdt.c @@ -62,6 +62,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss; #define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL) #define PDT_ADDR_SINGLE_ERR 1UL +#ifdef CONFIG_PROC_FS /* report PDT entries via /proc/meminfo */ void arch_report_meminfo(struct seq_file *m) { @@ -73,6 +74,7 @@ void arch_report_meminfo(struct seq_file *m) seq_printf(m, "PDT_cur_entries: %7lu\n", pdt_status.pdt_entries); } +#endif static int get_info_pat_new(void) { diff --git a/arch/parisc/math-emu/driver.c b/arch/parisc/math-emu/driver.c index 6ce427b58836..ecd27b48d61f 100644 --- a/arch/parisc/math-emu/driver.c +++ b/arch/parisc/math-emu/driver.c @@ -103,9 +103,19 @@ handle_fpe(struct pt_regs *regs) memcpy(regs->fr, frcopy, sizeof regs->fr); if (signalcode != 0) { - force_sig_fault(signalcode >> 24, signalcode & 0xffffff, - (void __user *) regs->iaoq[0]); - return -1; + int sig = signalcode >> 24; + + if (sig == SIGFPE) { + /* + * Clear floating point trap bit to avoid trapping + * again on the first floating-point instruction in + * the userspace signal handler. + */ + regs->fr[0] &= ~(1ULL << 38); + } + force_sig_fault(sig, signalcode & 0xffffff, + (void __user *) regs->iaoq[0]); + return -1; } return signalcode ? -1 : 0; diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 46ae22cd8bcc..9d7cbc38fbc7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -253,8 +253,8 @@ config PPC select HAVE_RSEQ select HAVE_SETUP_PER_CPU_AREA if PPC64 select HAVE_SOFTIRQ_ON_OWN_STACK - select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) - select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) + select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,$(m32-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 -mstack-protector-guard-offset=0) + select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,$(m64-flag) -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 -mstack-protector-guard-offset=0) select HAVE_STATIC_CALL if PPC32 select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index d7332c6afeaa..3925f35f3af6 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -89,13 +89,6 @@ KBUILD_AFLAGS += -m$(BITS) KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION) endif -cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard=tls -ifdef CONFIG_PPC64 -cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r13 -else -cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r2 -endif - LDFLAGS_vmlinux-y := -Bstatic LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) += -z notext @@ -383,9 +376,11 @@ prepare: stack_protector_prepare PHONY += stack_protector_prepare stack_protector_prepare: prepare0 ifdef CONFIG_PPC64 - $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h)) + $(eval KBUILD_CFLAGS += -mstack-protector-guard=tls -mstack-protector-guard-reg=r13 \ + -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h)) else - $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h)) + $(eval KBUILD_CFLAGS += -mstack-protector-guard=tls -mstack-protector-guard-reg=r2 \ + -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h)) endif endif diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index 56b876e418e9..6b998cb57255 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig @@ -78,4 +78,4 @@ CONFIG_DEBUG_VM_PGTABLE=y CONFIG_DETECT_HUNG_TASK=y CONFIG_BDI_SWITCH=y CONFIG_PPC_EARLY_DEBUG=y -CONFIG_GENERIC_PTDUMP=y +CONFIG_PTDUMP_DEBUGFS=y diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h index b6ac4f86c87b..433d164374cb 100644 --- a/arch/powerpc/include/asm/book3s/64/hash-4k.h +++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h @@ -89,6 +89,34 @@ static inline int hash__hugepd_ok(hugepd_t hpd) } #endif +/* + * With 4K page size the real_pte machinery is all nops. + */ +static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset) +{ + return (real_pte_t){pte}; +} + +#define __rpte_to_pte(r) ((r).pte) + +static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) +{ + return pte_val(__rpte_to_pte(rpte)) >> H_PAGE_F_GIX_SHIFT; +} + +#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ + do { \ + index = 0; \ + shift = mmu_psize_defs[psize].shift; \ + +#define pte_iterate_hashed_end() } while(0) + +/* + * We expect this to be called only for user addresses or kernel virtual + * addresses other than the linear mapping. + */ +#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K + /* * 4K PTE format is different from 64K PTE format. Saving the hash_slot is just * a matter of returning the PTE bits that need to be modified. On 64K PTE, diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index c436d8422654..fdbe0b381f3a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -318,32 +318,6 @@ extern unsigned long pci_io_base; #ifndef __ASSEMBLY__ -/* - * This is the default implementation of various PTE accessors, it's - * used in all cases except Book3S with 64K pages where we have a - * concept of sub-pages - */ -#ifndef __real_pte - -#define __real_pte(e, p, o) ((real_pte_t){(e)}) -#define __rpte_to_pte(r) ((r).pte) -#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT) - -#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \ - do { \ - index = 0; \ - shift = mmu_psize_defs[psize].shift; \ - -#define pte_iterate_hashed_end() } while(0) - -/* - * We expect this to be called only for user addresses or kernel virtual - * addresses other than the linear mapping. - */ -#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K - -#endif /* __real_pte */ - static inline unsigned long pte_update(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long clr, unsigned long set, int huge) diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h index ae0a68a838e8..69232231d270 100644 --- a/arch/powerpc/include/asm/cache.h +++ b/arch/powerpc/include/asm/cache.h @@ -33,6 +33,10 @@ #define IFETCH_ALIGN_BYTES (1 << IFETCH_ALIGN_SHIFT) +#ifdef CONFIG_NOT_COHERENT_CACHE +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#endif + #if !defined(__ASSEMBLY__) #ifdef CONFIG_PPC64 diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h index 4bcb9f9ac764..859af5de3c2f 100644 --- a/arch/powerpc/include/asm/dtl.h +++ b/arch/powerpc/include/asm/dtl.h @@ -1,8 +1,8 @@ #ifndef _ASM_POWERPC_DTL_H #define _ASM_POWERPC_DTL_H +#include #include -#include /* * Layout of entries in the hypervisor's dispatch trace log buffer. @@ -35,7 +35,7 @@ struct dtl_entry { #define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT) extern struct kmem_cache *dtl_cache; -extern rwlock_t dtl_access_lock; +extern struct rw_semaphore dtl_access_lock; extern void register_dtl_buffer(int cpu); extern void alloc_dtl_buffers(unsigned long *time_limit); diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h index 526a6a647312..daa44b2ef35a 100644 --- a/arch/powerpc/include/asm/fadump.h +++ b/arch/powerpc/include/asm/fadump.h @@ -32,4 +32,11 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname, int depth, void *data); extern int fadump_reserve_mem(void); #endif + +#if defined(CONFIG_FA_DUMP) && defined(CONFIG_CMA) +void fadump_cma_init(void); +#else +static inline void fadump_cma_init(void) { } +#endif + #endif /* _ASM_POWERPC_FADUMP_H */ diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index ea71f7245a63..8d8f4909ae1a 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -15,6 +15,15 @@ extern bool hugetlb_disabled; +static inline bool hugepages_supported(void) +{ + if (hugetlb_disabled) + return false; + + return HPAGE_SHIFT != 0; +} +#define hugepages_supported hugepages_supported + void __init hugetlbpage_init_defaultsize(void); int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h index 56f217606327..b9ac9e3a771c 100644 --- a/arch/powerpc/include/asm/page_32.h +++ b/arch/powerpc/include/asm/page_32.h @@ -12,10 +12,6 @@ #define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32 -#ifdef CONFIG_NOT_COHERENT_CACHE -#define ARCH_DMA_MINALIGN L1_CACHE_BYTES -#endif - #if defined(CONFIG_PPC_256K_PAGES) || \ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)) #define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */ diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h index 50950deedb87..e3d0e714ff28 100644 --- a/arch/powerpc/include/asm/sstep.h +++ b/arch/powerpc/include/asm/sstep.h @@ -173,9 +173,4 @@ int emulate_step(struct pt_regs *regs, ppc_inst_t instr); */ extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op); -extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, - const void *mem, bool cross_endian); -extern void emulate_vsx_store(struct instruction_op *op, - const union vsx_reg *reg, void *mem, - bool cross_endian); extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h index 7650b6ce14c8..8d972bc98b55 100644 --- a/arch/powerpc/include/asm/vdso.h +++ b/arch/powerpc/include/asm/vdso.h @@ -25,6 +25,7 @@ int vdso_getcpu_init(void); #ifdef __VDSO64__ #define V_FUNCTION_BEGIN(name) \ .globl name; \ + .type name,@function; \ name: \ #define V_FUNCTION_END(name) \ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 3ff2da7b120b..1866bac23400 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -80,27 +80,23 @@ static struct cma *fadump_cma; * But for some reason even if it fails we still have the memory reservation * with us and we can still continue doing fadump. */ -static int __init fadump_cma_init(void) +void __init fadump_cma_init(void) { unsigned long long base, size; int rc; - if (!fw_dump.fadump_enabled) - return 0; - + if (!fw_dump.fadump_supported || !fw_dump.fadump_enabled || + fw_dump.dump_active) + return; /* * Do not use CMA if user has provided fadump=nocma kernel parameter. - * Return 1 to continue with fadump old behaviour. */ - if (fw_dump.nocma) - return 1; + if (fw_dump.nocma || !fw_dump.boot_memory_size) + return; base = fw_dump.reserve_dump_area_start; size = fw_dump.boot_memory_size; - if (!size) - return 0; - rc = cma_init_reserved_mem(base, size, 0, "fadump_cma", &fadump_cma); if (rc) { pr_err("Failed to init cma area for firmware-assisted dump,%d\n", rc); @@ -110,7 +106,7 @@ static int __init fadump_cma_init(void) * blocked from production system usage. Hence return 1, * so that we can continue with fadump. */ - return 1; + return; } /* @@ -127,10 +123,7 @@ static int __init fadump_cma_init(void) cma_get_size(fadump_cma), (unsigned long)cma_get_base(fadump_cma) >> 20, fw_dump.reserve_dump_area_size); - return 1; } -#else -static int __init fadump_cma_init(void) { return 1; } #endif /* CONFIG_CMA */ /* Scan the Firmware Assisted dump configuration details. */ @@ -647,8 +640,6 @@ int __init fadump_reserve_mem(void) pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n", (size >> 20), base, (memblock_phys_mem_size() >> 20)); - - ret = fadump_cma_init(); } return ret; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index d464ba412084..ac669e58e202 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2924,7 +2924,7 @@ static void __init fixup_device_tree_chrp(void) #endif #if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC) -static void __init fixup_device_tree_pmac(void) +static void __init fixup_device_tree_pmac64(void) { phandle u3, i2c, mpic; u32 u3_rev; @@ -2964,7 +2964,31 @@ static void __init fixup_device_tree_pmac(void) &parent, sizeof(parent)); } #else -#define fixup_device_tree_pmac() +#define fixup_device_tree_pmac64() +#endif + +#ifdef CONFIG_PPC_PMAC +static void __init fixup_device_tree_pmac(void) +{ + __be32 val = 1; + char type[8]; + phandle node; + + // Some pmacs are missing #size-cells on escc or i2s nodes + for (node = 0; prom_next_node(&node); ) { + type[0] = '\0'; + prom_getprop(node, "device_type", type, sizeof(type)); + if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s")) + continue; + + if (prom_getproplen(node, "#size-cells") != PROM_ERROR) + continue; + + prom_setprop(node, NULL, "#size-cells", &val, sizeof(val)); + } +} +#else +static inline void fixup_device_tree_pmac(void) { } #endif #ifdef CONFIG_PPC_EFIKA @@ -3189,6 +3213,7 @@ static void __init fixup_device_tree(void) fixup_device_tree_maple_memory_controller(); fixup_device_tree_chrp(); fixup_device_tree_pmac(); + fixup_device_tree_pmac64(); fixup_device_tree_efika(); fixup_device_tree_pasemi(); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index f8d3caad4cf3..3c06c8389e05 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -1178,6 +1179,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs) || nargs + nret > ARRAY_SIZE(args.args)) return -EINVAL; + nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args)); + nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs); + /* Copy in args. */ if (copy_from_user(args.args, uargs->args, nargs * sizeof(rtas_arg_t)) != 0) diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 56f6b958926d..41c03c8e41b4 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -982,9 +982,11 @@ void __init setup_arch(char **cmdline_p) initmem_init(); /* - * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must - * be called after initmem_init(), so that pageblock_order is initialised. + * Reserve large chunks of memory for use by CMA for fadump, KVM and + * hugetlb. These must be called after initmem_init(), so that + * pageblock_order is initialised. */ + fadump_cma_init(); kvm_cma_reserve(); gigantic_hugetlb_cma_reserve(); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 7662265f2433..dbeefbd10171 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -924,6 +924,7 @@ static int __init disable_hardlockup_detector(void) hardlockup_detector_disable(); #else if (firmware_has_feature(FW_FEATURE_LPAR)) { + check_kvm_guest(); if (is_kvm_guest()) hardlockup_detector_disable(); } diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile index a2e7b0ce5b19..ee9fd65dd8cb 100644 --- a/arch/powerpc/kernel/vdso/Makefile +++ b/arch/powerpc/kernel/vdso/Makefile @@ -10,23 +10,11 @@ obj-vdso64 = sigtramp64-64.o gettimeofday-64.o datapage-64.o cacheflush-64.o not ifneq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday-32.o += -include $(c-gettimeofday-y) - CFLAGS_vgettimeofday-32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_vgettimeofday-32.o += $(call cc-option, -fno-stack-protector) - CFLAGS_vgettimeofday-32.o += -DDISABLE_BRANCH_PROFILING - CFLAGS_vgettimeofday-32.o += -ffreestanding -fasynchronous-unwind-tables - CFLAGS_REMOVE_vgettimeofday-32.o = $(CC_FLAGS_FTRACE) - CFLAGS_REMOVE_vgettimeofday-32.o += -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc - CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y) - CFLAGS_vgettimeofday-64.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) - CFLAGS_vgettimeofday-64.o += $(call cc-option, -fno-stack-protector) - CFLAGS_vgettimeofday-64.o += -DDISABLE_BRANCH_PROFILING - CFLAGS_vgettimeofday-64.o += -ffreestanding -fasynchronous-unwind-tables - CFLAGS_REMOVE_vgettimeofday-64.o = $(CC_FLAGS_FTRACE) # Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true # by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is # compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code # generation is minimal, it will just use r29 instead. - CFLAGS_vgettimeofday-64.o += $(call cc-option, -ffixed-r30) + CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y) $(call cc-option, -ffixed-r30) endif # Build rules @@ -47,14 +35,33 @@ KCOV_INSTRUMENT := n UBSAN_SANITIZE := n KASAN_SANITIZE := n -ccflags-y := -shared -fno-common -fno-builtin -nostdlib -Wl,--hash-style=both -ccflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld) +ccflags-y := -fno-common -fno-builtin +ccflags-y += $(DISABLE_LATENT_ENTROPY_PLUGIN) +ccflags-y += $(call cc-option, -fno-stack-protector) +ccflags-y += -DDISABLE_BRANCH_PROFILING +ccflags-y += -ffreestanding -fasynchronous-unwind-tables +ccflags-remove-y := $(CC_FLAGS_FTRACE) +ldflags-y := -Wl,--hash-style=both -nostdlib -shared -z noexecstack $(CLANG_FLAGS) +ldflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld) +# Filter flags that clang will warn are unused for linking +ldflags-y += $(filter-out $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CFLAGS)) -CC32FLAGS := -Wl,-soname=linux-vdso32.so.1 -m32 -AS32FLAGS := -D__VDSO32__ -s +CC32FLAGS := -m32 +CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc +ifdef CONFIG_CC_IS_CLANG +# This flag is supported by clang for 64-bit but not 32-bit so it will cause +# an unused command line flag warning for this file. +CC32FLAGSREMOVE += -fno-stack-clash-protection +# -mstack-protector-guard values from the 64-bit build are not valid for the +# 32-bit one. clang validates the values passed to these arguments during +# parsing, even when -fno-stack-protector is passed afterwards. +CC32FLAGSREMOVE += -mstack-protector-guard% +endif +LD32FLAGS := -Wl,-soname=linux-vdso32.so.1 +AS32FLAGS := -D__VDSO32__ -CC64FLAGS := -Wl,-soname=linux-vdso64.so.1 -AS64FLAGS := -D__VDSO64__ -s +LD64FLAGS := -Wl,-soname=linux-vdso64.so.1 +AS64FLAGS := -D__VDSO64__ targets += vdso32.lds CPPFLAGS_vdso32.lds += -P -C -Upowerpc @@ -92,13 +99,15 @@ include/generated/vdso64-offsets.h: $(obj)/vdso64.so.dbg FORCE # actual build commands quiet_cmd_vdso32ld_and_check = VDSO32L $@ - cmd_vdso32ld_and_check = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check) + cmd_vdso32ld_and_check = $(VDSOCC) $(ldflags-y) $(CC32FLAGS) $(LD32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^); $(cmd_vdso_check) quiet_cmd_vdso32as = VDSO32A $@ cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) $(AS32FLAGS) -c -o $@ $< quiet_cmd_vdso32cc = VDSO32C $@ - cmd_vdso32cc = $(VDSOCC) $(c_flags) $(CC32FLAGS) -c -o $@ $< + cmd_vdso32cc = $(VDSOCC) $(filter-out $(CC32FLAGSREMOVE), $(c_flags)) $(CC32FLAGS) -c -o $@ $< quiet_cmd_vdso64ld_and_check = VDSO64L $@ - cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check) + cmd_vdso64ld_and_check = $(VDSOCC) $(ldflags-y) $(LD64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^); $(cmd_vdso_check) quiet_cmd_vdso64as = VDSO64A $@ - cmd_vdso64as = $(VDSOCC) $(a_flags) $(CC64FLAGS) $(AS64FLAGS) -c -o $@ $< + cmd_vdso64as = $(VDSOCC) $(a_flags) $(AS64FLAGS) -c -o $@ $< + +OBJECT_FILES_NON_STANDARD := y diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c index 180c1dfe4aa7..04d100ca18b8 100644 --- a/arch/powerpc/kexec/file_load_64.c +++ b/arch/powerpc/kexec/file_load_64.c @@ -911,13 +911,18 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code, if (dn) { u64 val; - of_property_read_u64(dn, "opal-base-address", &val); + ret = of_property_read_u64(dn, "opal-base-address", &val); + if (ret) + goto out; + ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val, sizeof(val), false); if (ret) goto out; - of_property_read_u64(dn, "opal-entry-address", &val); + ret = of_property_read_u64(dn, "opal-entry-address", &val); + if (ret) + goto out; ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val, sizeof(val), false); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 6ba68dd6190b..45708ac55e90 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4062,6 +4062,15 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns } hvregs.hdec_expiry = time_limit; + /* + * hvregs has the doorbell status, so zero it here which + * enables us to receive doorbells when H_ENTER_NESTED is + * in progress for this vCPU + */ + + if (vcpu->arch.doorbell_request) + vcpu->arch.doorbell_request = 0; + /* * When setting DEC, we must always deal with irq_work_raise * via NMI vs setting DEC. The problem occurs right as we @@ -4644,7 +4653,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, lpcr |= LPCR_MER; } } else if (vcpu->arch.pending_exceptions || - vcpu->arch.doorbell_request || xive_interrupt_pending(vcpu)) { vcpu->arch.ret = RESUME_HOST; goto out; diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c index 5a64a1341e6f..bc71a90c7cc7 100644 --- a/arch/powerpc/kvm/book3s_hv_nested.c +++ b/arch/powerpc/kvm/book3s_hv_nested.c @@ -32,7 +32,7 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr) struct kvmppc_vcore *vc = vcpu->arch.vcore; hr->pcr = vc->pcr | PCR_MASK; - hr->dpdes = vc->dpdes; + hr->dpdes = vcpu->arch.doorbell_request; hr->hfscr = vcpu->arch.hfscr; hr->tb_offset = vc->tb_offset; hr->dawr0 = vcpu->arch.dawr0; @@ -105,7 +105,7 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, { struct kvmppc_vcore *vc = vcpu->arch.vcore; - hr->dpdes = vc->dpdes; + hr->dpdes = vcpu->arch.doorbell_request; hr->purr = vcpu->arch.purr; hr->spurr = vcpu->arch.spurr; hr->ic = vcpu->arch.ic; @@ -143,7 +143,7 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state * struct kvmppc_vcore *vc = vcpu->arch.vcore; vc->pcr = hr->pcr | PCR_MASK; - vc->dpdes = hr->dpdes; + vcpu->arch.doorbell_request = hr->dpdes; vcpu->arch.hfscr = hr->hfscr; vcpu->arch.dawr0 = hr->dawr0; vcpu->arch.dawrx0 = hr->dawrx0; @@ -170,7 +170,13 @@ void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, { struct kvmppc_vcore *vc = vcpu->arch.vcore; - vc->dpdes = hr->dpdes; + /* + * This L2 vCPU might have received a doorbell while H_ENTER_NESTED was being handled. + * Make sure we preserve the doorbell if it was either: + * a) Sent after H_ENTER_NESTED was called on this vCPU (arch.doorbell_request would be 1) + * b) Doorbell was not handled and L2 exited for some other reason (hr->dpdes would be 1) + */ + vcpu->arch.doorbell_request = vcpu->arch.doorbell_request | hr->dpdes; vcpu->arch.hfscr = hr->hfscr; vcpu->arch.purr = hr->purr; vcpu->arch.spurr = hr->spurr; diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c index ad0cf3108dd0..65353cf2b0a8 100644 --- a/arch/powerpc/lib/code-patching.c +++ b/arch/powerpc/lib/code-patching.c @@ -53,7 +53,7 @@ static int text_area_cpu_up(unsigned int cpu) unsigned long addr; int err; - area = get_vm_area(PAGE_SIZE, VM_ALLOC); + area = get_vm_area(PAGE_SIZE, 0); if (!area) { WARN_ONCE(1, "Failed to create text area for cpu %d\n", cpu); diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index ec30af8eadb7..289690814ad4 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -780,8 +780,8 @@ static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea, #endif /* __powerpc64 */ #ifdef CONFIG_VSX -void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, - const void *mem, bool rev) +static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, + const void *mem, bool rev) { int size, read_size; int i, j; @@ -863,11 +863,9 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg, break; } } -EXPORT_SYMBOL_GPL(emulate_vsx_load); -NOKPROBE_SYMBOL(emulate_vsx_load); -void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, - void *mem, bool rev) +static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, + void *mem, bool rev) { int size, write_size; int i, j; @@ -955,8 +953,6 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg, break; } } -EXPORT_SYMBOL_GPL(emulate_vsx_store); -NOKPROBE_SYMBOL(emulate_vsx_store); static nokprobe_inline int do_vsx_load(struct instruction_op *op, unsigned long ea, struct pt_regs *regs, diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 644e4ec6ce99..4b59315cb820 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -431,10 +431,16 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, /* * The kernel should never take an execute fault nor should it * take a page fault to a kernel address or a page fault to a user - * address outside of dedicated places + * address outside of dedicated places. + * + * Rather than kfence directly reporting false negatives, search whether + * the NIP belongs to the fixup table for cases where fault could come + * from functions like copy_from_kernel_nofault(). */ if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) { - if (kfence_handle_page_fault(address, is_write, regs)) + if (is_kfence_address((void *)address) && + !search_exception_tables(instruction_pointer(regs)) && + kfence_handle_page_fault(address, is_write, regs)) return 0; return SIGSEGV; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index e3c31c771ce9..470d7715ecf4 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2229,6 +2229,10 @@ static struct pmu power_pmu = { #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \ PERF_SAMPLE_PHYS_ADDR | \ PERF_SAMPLE_DATA_PAGE_SIZE) + +#define SIER_TYPE_SHIFT 15 +#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT) + /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled @@ -2297,6 +2301,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val, is_kernel_addr(mfspr(SPRN_SIAR))) record = 0; + /* + * SIER[46-48] presents instruction type of the sampled instruction. + * In ISA v3.0 and before values "0" and "7" are considered reserved. + * In ISA v3.1, value "7" has been used to indicate "larx/stcx". + * Drop the sample if "type" has reserved values for this field with a + * ISA version check. + */ + if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && + ppmu->get_mem_data_src) { + val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT; + if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) { + record = 0; + atomic64_inc(&event->lost_samples); + } + } + /* * Finally record data if requested. */ diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index 56301b2bc8ae..031a2b63c171 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -321,8 +321,10 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags, sier = mfspr(SPRN_SIER); val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; - if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) + if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) { + dsrc->val = 0; return; + } idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT; sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT; diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c index 92e60cb3163f..d954ddf7f059 100644 --- a/arch/powerpc/platforms/book3s/vas-api.c +++ b/arch/powerpc/platforms/book3s/vas-api.c @@ -464,7 +464,43 @@ static vm_fault_t vas_mmap_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } +/* + * During mmap() paste address, mapping VMA is saved in VAS window + * struct which is used to unmap during migration if the window is + * still open. But the user space can remove this mapping with + * munmap() before closing the window and the VMA address will + * be invalid. Set VAS window VMA to NULL in this function which + * is called before VMA free. + */ +static void vas_mmap_close(struct vm_area_struct *vma) +{ + struct file *fp = vma->vm_file; + struct coproc_instance *cp_inst = fp->private_data; + struct vas_window *txwin; + + /* Should not happen */ + if (!cp_inst || !cp_inst->txwin) { + pr_err("No attached VAS window for the paste address mmap\n"); + return; + } + + txwin = cp_inst->txwin; + /* + * task_ref.vma is set in coproc_mmap() during mmap paste + * address. So it has to be the same VMA that is getting freed. + */ + if (WARN_ON(txwin->task_ref.vma != vma)) { + pr_err("Invalid paste address mmaping\n"); + return; + } + + mutex_lock(&txwin->task_ref.mmap_mutex); + txwin->task_ref.vma = NULL; + mutex_unlock(&txwin->task_ref.mmap_mutex); +} + static const struct vm_operations_struct vas_vm_ops = { + .close = vas_mmap_close, .fault = vas_mmap_fault, }; diff --git a/arch/powerpc/platforms/cell/spufs/gang.c b/arch/powerpc/platforms/cell/spufs/gang.c index 827d338deaf4..2c2999de6bfa 100644 --- a/arch/powerpc/platforms/cell/spufs/gang.c +++ b/arch/powerpc/platforms/cell/spufs/gang.c @@ -25,6 +25,7 @@ struct spu_gang *alloc_spu_gang(void) mutex_init(&gang->aff_mutex); INIT_LIST_HEAD(&gang->list); INIT_LIST_HEAD(&gang->aff_list_head); + gang->alive = 1; out: return gang; diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index dbcfe361831a..d11951586e1f 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -191,13 +191,32 @@ static int spufs_fill_dir(struct dentry *dir, return -ENOMEM; ret = spufs_new_file(dir->d_sb, dentry, files->ops, files->mode & mode, files->size, ctx); - if (ret) + if (ret) { + dput(dentry); return ret; + } files++; } return 0; } +static void unuse_gang(struct dentry *dir) +{ + struct inode *inode = dir->d_inode; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; + + if (gang) { + bool dead; + + inode_lock(inode); // exclusion with spufs_create_context() + dead = !--gang->alive; + inode_unlock(inode); + + if (dead) + simple_recursive_removal(dir, NULL); + } +} + static int spufs_dir_close(struct inode *inode, struct file *file) { struct inode *parent; @@ -212,6 +231,7 @@ static int spufs_dir_close(struct inode *inode, struct file *file) inode_unlock(parent); WARN_ON(ret); + unuse_gang(dir->d_parent); return dcache_dir_close(inode, file); } @@ -404,7 +424,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, { int ret; int affinity; - struct spu_gang *gang; + struct spu_gang *gang = SPUFS_I(inode)->i_gang; struct spu_context *neighbor; struct path path = {.mnt = mnt, .dentry = dentry}; @@ -419,11 +439,15 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader) return -ENODEV; - gang = NULL; + if (gang) { + if (!gang->alive) + return -ENOENT; + gang->alive++; + } + neighbor = NULL; affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU); if (affinity) { - gang = SPUFS_I(inode)->i_gang; if (!gang) return -EINVAL; mutex_lock(&gang->aff_mutex); @@ -435,8 +459,11 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, } ret = spufs_mkdir(inode, dentry, flags, mode & 0777); - if (ret) + if (ret) { + if (neighbor) + put_spu_context(neighbor); goto out_aff_unlock; + } if (affinity) { spufs_set_affinity(flags, SPUFS_I(d_inode(dentry))->i_ctx, @@ -452,6 +479,8 @@ spufs_create_context(struct inode *inode, struct dentry *dentry, out_aff_unlock: if (affinity) mutex_unlock(&gang->aff_mutex); + if (ret && gang) + gang->alive--; // can't reach 0 return ret; } @@ -481,6 +510,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode) inode->i_fop = &simple_dir_operations; d_instantiate(dentry, inode); + dget(dentry); inc_nlink(dir); inc_nlink(d_inode(dentry)); return ret; @@ -491,6 +521,21 @@ out: return ret; } +static int spufs_gang_close(struct inode *inode, struct file *file) +{ + unuse_gang(file->f_path.dentry); + return dcache_dir_close(inode, file); +} + +static const struct file_operations spufs_gang_fops = { + .open = dcache_dir_open, + .release = spufs_gang_close, + .llseek = dcache_dir_lseek, + .read = generic_read_dir, + .iterate_shared = dcache_readdir, + .fsync = noop_fsync, +}; + static int spufs_gang_open(const struct path *path) { int ret; @@ -510,7 +555,7 @@ static int spufs_gang_open(const struct path *path) return PTR_ERR(filp); } - filp->f_op = &simple_dir_operations; + filp->f_op = &spufs_gang_fops; fd_install(ret, filp); return ret; } @@ -525,10 +570,8 @@ static int spufs_create_gang(struct inode *inode, ret = spufs_mkgang(inode, dentry, mode & 0777); if (!ret) { ret = spufs_gang_open(&path); - if (ret < 0) { - int err = simple_rmdir(inode, dentry); - WARN_ON(err); - } + if (ret < 0) + unuse_gang(dentry); } return ret; } diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 84958487f696..d33787c57c39 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -151,6 +151,8 @@ struct spu_gang { int aff_flags; struct spu *aff_ref_spu; atomic_t aff_sched_count; + + int alive; }; /* Flag bits for spu_gang aff_flags */ diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index 3f1cdccebc9c..ecc04ef8c53e 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c @@ -191,7 +191,7 @@ static int dtl_enable(struct dtl *dtl) return -EBUSY; /* ensure there are no other conflicting dtl users */ - if (!read_trylock(&dtl_access_lock)) + if (!down_read_trylock(&dtl_access_lock)) return -EBUSY; n_entries = dtl_buf_entries; @@ -199,7 +199,7 @@ static int dtl_enable(struct dtl *dtl) if (!buf) { printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", __func__, dtl->cpu); - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); return -ENOMEM; } @@ -217,7 +217,7 @@ static int dtl_enable(struct dtl *dtl) spin_unlock(&dtl->lock); if (rc) { - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); kmem_cache_free(dtl_cache, buf); } @@ -232,7 +232,7 @@ static void dtl_disable(struct dtl *dtl) dtl->buf = NULL; dtl->buf_entries = 0; spin_unlock(&dtl->lock); - read_unlock(&dtl_access_lock); + up_read(&dtl_access_lock); } /* file interface */ diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index e5a58a9b2fe9..ce6759e082a7 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c @@ -580,8 +580,10 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay) switch(rets[0]) { case 0: - result = EEH_STATE_MMIO_ACTIVE | - EEH_STATE_DMA_ACTIVE; + result = EEH_STATE_MMIO_ACTIVE | + EEH_STATE_DMA_ACTIVE | + EEH_STATE_MMIO_ENABLED | + EEH_STATE_DMA_ENABLED; break; case 1: result = EEH_STATE_RESET_ACTIVE | diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 29d235b02f06..98f8e0a39eb2 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -167,7 +167,7 @@ struct vcpu_dispatch_data { */ #define NR_CPUS_H NR_CPUS -DEFINE_RWLOCK(dtl_access_lock); +DECLARE_RWSEM(dtl_access_lock); static DEFINE_PER_CPU(struct vcpu_dispatch_data, vcpu_disp_data); static DEFINE_PER_CPU(u64, dtl_entry_ridx); static DEFINE_PER_CPU(struct dtl_worker, dtl_workers); @@ -461,7 +461,7 @@ static int dtl_worker_enable(unsigned long *time_limit) { int rc = 0, state; - if (!write_trylock(&dtl_access_lock)) { + if (!down_write_trylock(&dtl_access_lock)) { rc = -EBUSY; goto out; } @@ -477,7 +477,7 @@ static int dtl_worker_enable(unsigned long *time_limit) pr_err("vcpudispatch_stats: unable to setup workqueue for DTL processing\n"); free_dtl_buffers(time_limit); reset_global_dtl_mask(); - write_unlock(&dtl_access_lock); + up_write(&dtl_access_lock); rc = -EINVAL; goto out; } @@ -492,7 +492,7 @@ static void dtl_worker_disable(unsigned long *time_limit) cpuhp_remove_state(dtl_worker_state); free_dtl_buffers(time_limit); reset_global_dtl_mask(); - write_unlock(&dtl_access_lock); + up_write(&dtl_access_lock); } static ssize_t vcpudispatch_stats_write(struct file *file, const char __user *p, diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index dcf1bc9de584..3d97c951a384 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -82,7 +82,7 @@ struct dyn_arch_ftrace { #define make_call_t0(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_t0(offset); \ call[1] = to_jalr_t0(offset); \ } while (0) @@ -98,7 +98,7 @@ do { \ #define make_call_ra(caller, callee, call) \ do { \ unsigned int offset = \ - (unsigned long) callee - (unsigned long) caller; \ + (unsigned long) (callee) - (unsigned long) (caller); \ call[0] = to_auipc_ra(offset); \ call[1] = to_jalr_ra(offset); \ } while (0) diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h index fc8130f995c1..6907c456ac8c 100644 --- a/arch/riscv/include/asm/futex.h +++ b/arch/riscv/include/asm/futex.h @@ -93,7 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) - : [ov] "Jr" (oldval), [nv] "Jr" (newval) + : [ov] "Jr" ((long)(int)oldval), [nv] "Jr" (newval) : "memory"); __disable_user_access(); diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h index 46677daf708b..cc11c4544cff 100644 --- a/arch/riscv/include/asm/kgdb.h +++ b/arch/riscv/include/asm/kgdb.h @@ -19,16 +19,9 @@ #ifndef __ASSEMBLY__ +void arch_kgdb_breakpoint(void); extern unsigned long kgdb_compiled_break; -static inline void arch_kgdb_breakpoint(void) -{ - asm(".global kgdb_compiled_break\n" - ".option norvc\n" - "kgdb_compiled_break: ebreak\n" - ".option rvc\n"); -} - #endif /* !__ASSEMBLY__ */ #define DBG_REG_ZERO "zero" diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 86048c60f700..fd861ac47a89 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -115,6 +115,7 @@ struct kernel_mapping { extern struct kernel_mapping kernel_map; extern phys_addr_t phys_ram_base; +extern unsigned long vmemmap_start_pfn; #define is_kernel_mapping(x) \ ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 7d1688f850c3..bb19a643c5c2 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -79,7 +79,7 @@ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. */ -#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT)) +#define vmemmap ((struct page *)VMEMMAP_START - vmemmap_start_pfn) #define PCI_IO_SIZE SZ_16M #define PCI_IO_END VMEMMAP_START diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 384a63b86420..8426c4510d31 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -61,8 +61,11 @@ static inline void syscall_get_arguments(struct task_struct *task, unsigned long *args) { args[0] = regs->orig_a0; - args++; - memcpy(args, ®s->a1, 5 * sizeof(args[0])); + args[1] = regs->a1; + args[2] = regs->a2; + args[3] = regs->a3; + args[4] = regs->a4; + args[5] = regs->a5; } static inline int syscall_get_arch(struct task_struct *task) diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c index 963ed7edcff2..1d83b3696721 100644 --- a/arch/riscv/kernel/kgdb.c +++ b/arch/riscv/kernel/kgdb.c @@ -273,6 +273,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) regs->epc = pc; } +noinline void arch_kgdb_breakpoint(void) +{ + asm(".global kgdb_compiled_break\n" + "kgdb_compiled_break: ebreak\n"); +} + void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer, char *remcom_out_buffer) { diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 2acf51c23567..f4c3252abce4 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -76,6 +76,9 @@ static struct resource bss_res = { .name = "Kernel bss", }; static struct resource elfcorehdr_res = { .name = "ELF Core hdr", }; #endif +static int num_standard_resources; +static struct resource *standard_resources; + static int __init add_resource(struct resource *parent, struct resource *res) { @@ -149,7 +152,7 @@ static void __init init_resources(void) struct resource *res = NULL; struct resource *mem_res = NULL; size_t mem_res_sz = 0; - int num_resources = 0, res_idx = 0; + int num_resources = 0, res_idx = 0, non_resv_res = 0; int ret = 0; /* + 1 as memblock_alloc() might increase memblock.reserved.cnt */ @@ -213,6 +216,7 @@ static void __init init_resources(void) /* Add /memory regions to the resource tree */ for_each_mem_region(region) { res = &mem_res[res_idx--]; + non_resv_res++; if (unlikely(memblock_is_nomap(region))) { res->name = "Reserved"; @@ -230,6 +234,9 @@ static void __init init_resources(void) goto error; } + num_standard_resources = non_resv_res; + standard_resources = &mem_res[res_idx + 1]; + /* Clean-up any unused pre-allocated resources */ if (res_idx >= 0) memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res)); @@ -241,6 +248,33 @@ static void __init init_resources(void) memblock_free(mem_res, mem_res_sz); } +static int __init reserve_memblock_reserved_regions(void) +{ + u64 i, j; + + for (i = 0; i < num_standard_resources; i++) { + struct resource *mem = &standard_resources[i]; + phys_addr_t r_start, r_end, mem_size = resource_size(mem); + + if (!memblock_is_region_reserved(mem->start, mem_size)) + continue; + + for_each_reserved_mem_range(j, &r_start, &r_end) { + resource_size_t start, end; + + start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); + end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); + + if (start > mem->end || end < mem->start) + continue; + + reserve_region_with_split(mem, start, end, "Reserved"); + } + } + + return 0; +} +arch_initcall(reserve_memblock_reserved_regions); static void __init parse_dtb(void) { diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 5d07f6b3ca32..7a6c02e17f4f 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -27,7 +27,7 @@ int show_unhandled_signals = 1; -static DEFINE_SPINLOCK(die_lock); +static DEFINE_RAW_SPINLOCK(die_lock); void die(struct pt_regs *regs, const char *str) { @@ -38,7 +38,7 @@ void die(struct pt_regs *regs, const char *str) oops_enter(); - spin_lock_irqsave(&die_lock, flags); + raw_spin_lock_irqsave(&die_lock, flags); console_verbose(); bust_spinlocks(1); @@ -55,7 +55,7 @@ void die(struct pt_regs *regs, const char *str) bust_spinlocks(0); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); - spin_unlock_irqrestore(&die_lock, flags); + raw_spin_unlock_irqrestore(&die_lock, flags); oops_exit(); if (in_interrupt()) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index ba2210b553f9..72b3462babbf 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -52,6 +53,13 @@ EXPORT_SYMBOL(pgtable_l5_enabled); phys_addr_t phys_ram_base __ro_after_init; EXPORT_SYMBOL(phys_ram_base); +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#define VMEMMAP_ADDR_ALIGN (1ULL << SECTION_SIZE_BITS) + +unsigned long vmemmap_start_pfn __ro_after_init; +EXPORT_SYMBOL(vmemmap_start_pfn); +#endif + unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; EXPORT_SYMBOL(empty_zero_page); @@ -210,8 +218,12 @@ static void __init setup_bootmem(void) memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); phys_ram_end = memblock_end_of_DRAM(); - if (!IS_ENABLED(CONFIG_XIP_KERNEL)) + if (!IS_ENABLED(CONFIG_XIP_KERNEL)) { phys_ram_base = memblock_start_of_DRAM(); +#ifdef CONFIG_SPARSEMEM_VMEMMAP + vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT; +#endif +} /* * Reserve physical address space that would be mapped to virtual * addresses greater than (void *)(-PAGE_SIZE) because: @@ -946,6 +958,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom); phys_ram_base = CONFIG_PHYS_RAM_BASE; +#ifdef CONFIG_SPARSEMEM_VMEMMAP + vmemmap_start_pfn = round_down(phys_ram_base, VMEMMAP_ADDR_ALIGN) >> PAGE_SHIFT; +#endif kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE; kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_start); diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 5ed242897b0d..237f36d1e6c2 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -21,7 +21,7 @@ KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__ ifndef CONFIG_AS_IS_LLVM KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf)) endif -KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack +KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11 KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index eaeaeb3ff0be..752a2310f0d6 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -44,7 +44,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, break; case FUTEX_OP_ANDN: __futex_atomic_op("lr %2,%1\nnr %2,%5\n", - ret, oldval, newval, uaddr, oparg); + ret, oldval, newval, uaddr, ~oparg); break; case FUTEX_OP_XOR: __futex_atomic_op("lr %2,%1\nxr %2,%5\n", diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index c9799dec9279..62b80616ca72 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -484,9 +484,13 @@ ENTRY(\name) ENDPROC(\name) .endm + .section .irqentry.text, "ax" + INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq + .section .kprobes.text, "ax" + /* * Load idle PSW. */ diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 6c8872f76fb3..66db12df6ed1 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -549,6 +549,12 @@ int __init arch_init_kprobes(void) return 0; } +int __init arch_populate_kprobe_blacklist(void) +{ + return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start, + (unsigned long)__irqentry_text_end); +} + int arch_trampoline_kprobe(struct kprobe *p) { return 0; diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index f3b0a106f722..46a1a85a0e44 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1896,7 +1896,9 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags) event->hw.state |= PERF_HES_STOPPED; if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { - hw_perf_event_update(event, 1); + /* CPU hotplug off removes SDBs. No samples to extract. */ + if (cpuhw->flags & PMU_F_RESERVED) + hw_perf_event_update(event, 1); event->hw.state |= PERF_HES_UPTODATE; } perf_pmu_enable(event->pmu); diff --git a/arch/s390/kernel/syscalls/Makefile b/arch/s390/kernel/syscalls/Makefile index fb85e797946d..2bd7756288df 100644 --- a/arch/s390/kernel/syscalls/Makefile +++ b/arch/s390/kernel/syscalls/Makefile @@ -12,7 +12,7 @@ kapi-hdrs-y := $(kapi)/unistd_nr.h uapi-hdrs-y := $(uapi)/unistd_32.h uapi-hdrs-y += $(uapi)/unistd_64.h -targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y)) +targets += $(addprefix ../../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y)) PHONY += kapi uapi diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 1d2aa448d103..bfedbd7fae3b 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -276,10 +276,10 @@ static void __init test_monitor_call(void) return; asm volatile( " mc 0,0\n" - "0: xgr %0,%0\n" + "0: lhi %[val],0\n" "1:\n" - EX_TABLE(0b,1b) - : "+d" (val)); + EX_TABLE(0b, 1b) + : [val] "+d" (val)); if (!val) panic("Monitor call doesn't work!\n"); } diff --git a/arch/s390/kvm/trace-s390.h b/arch/s390/kvm/trace-s390.h index 6f0209d45164..9c5f546a2e1a 100644 --- a/arch/s390/kvm/trace-s390.h +++ b/arch/s390/kvm/trace-s390.h @@ -56,7 +56,7 @@ TRACE_EVENT(kvm_s390_create_vcpu, __entry->sie_block = sie_block; ), - TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK", + TP_printk("create cpu %d at 0x%p, sie block at 0x%p", __entry->id, __entry->vcpu, __entry->sie_block) ); @@ -255,7 +255,7 @@ TRACE_EVENT(kvm_s390_enable_css, __entry->kvm = kvm; ), - TP_printk("enabling channel I/O support (kvm @ %pK)\n", + TP_printk("enabling channel I/O support (kvm @ %p)\n", __entry->kvm) ); diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index d90c818a9ae7..30e22aa9415c 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -1331,8 +1331,14 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr) page = radix_tree_lookup(&kvm->arch.vsie.addr_to_page, addr >> 9); rcu_read_unlock(); if (page) { - if (page_ref_inc_return(page) == 2) - return page_to_virt(page); + if (page_ref_inc_return(page) == 2) { + if (page->index == addr) + return page_to_virt(page); + /* + * We raced with someone reusing + putting this vsie + * page before we grabbed it. + */ + } page_ref_dec(page); } @@ -1362,15 +1368,20 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr) kvm->arch.vsie.next++; kvm->arch.vsie.next %= nr_vcpus; } - radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); + if (page->index != ULONG_MAX) + radix_tree_delete(&kvm->arch.vsie.addr_to_page, + page->index >> 9); } - page->index = addr; - /* double use of the same address */ + /* Mark it as invalid until it resides in the tree. */ + page->index = ULONG_MAX; + + /* Double use of the same address or allocation failure. */ if (radix_tree_insert(&kvm->arch.vsie.addr_to_page, addr >> 9, page)) { page_ref_dec(page); mutex_unlock(&kvm->arch.vsie.mutex); return NULL; } + page->index = addr; mutex_unlock(&kvm->arch.vsie.mutex); vsie_page = page_to_virt(page); @@ -1463,7 +1474,9 @@ void kvm_s390_vsie_destroy(struct kvm *kvm) vsie_page = page_to_virt(page); release_gmap_shadow(vsie_page); /* free the radix tree entry */ - radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); + if (page->index != ULONG_MAX) + radix_tree_delete(&kvm->arch.vsie.addr_to_page, + page->index >> 9); __free_page(page); } kvm->arch.vsie.page_count = 0; diff --git a/arch/s390/purgatory/Makefile b/arch/s390/purgatory/Makefile index 4cbf306b8181..be0626a07dd8 100644 --- a/arch/s390/purgatory/Makefile +++ b/arch/s390/purgatory/Makefile @@ -21,7 +21,7 @@ UBSAN_SANITIZE := n KASAN_SANITIZE := n KCSAN_SANITIZE := n -KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes +KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding KBUILD_CFLAGS += -c -MD -Os -m64 -msoft-float -fno-common diff --git a/arch/sh/kernel/cpu/proc.c b/arch/sh/kernel/cpu/proc.c index a306bcd6b341..5f6d0e827bae 100644 --- a/arch/sh/kernel/cpu/proc.c +++ b/arch/sh/kernel/cpu/proc.c @@ -132,7 +132,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { - return *pos < NR_CPUS ? cpu_data + *pos : NULL; + return *pos < nr_cpu_ids ? cpu_data + *pos : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 946f33c1b032..4b762c35b608 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -52,8 +52,10 @@ out: void arch_enter_lazy_mmu_mode(void) { - struct tlb_batch *tb = this_cpu_ptr(&tlb_batch); + struct tlb_batch *tb; + preempt_disable(); + tb = this_cpu_ptr(&tlb_batch); tb->active = 1; } @@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void) if (tb->tlb_nr) flush_tlb_pending(); tb->active = 0; + preempt_enable(); } static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, diff --git a/arch/um/Makefile b/arch/um/Makefile index 778c50f27399..25d0501549f5 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -155,5 +155,6 @@ MRPROPER_FILES += $(HOST_DIR)/include/generated archclean: @find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \ -o -name '*.gcov' \) -type f -print | xargs rm -f + $(Q)$(MAKE) -f $(srctree)/Makefile ARCH=$(HEADER_ARCH) clean export HEADER_ARCH SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS DEV_NULL_PATH diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index cabcc501b448..de187e58dc2b 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -336,7 +336,7 @@ static struct platform_driver uml_net_driver = { static void net_device_release(struct device *dev) { - struct uml_net *device = dev_get_drvdata(dev); + struct uml_net *device = container_of(dev, struct uml_net, pdev.dev); struct net_device *netdev = device->dev; struct uml_net_private *lp = netdev_priv(netdev); diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 1203f5078cb5..18e85f929d70 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -799,7 +799,7 @@ static int ubd_open_dev(struct ubd *ubd_dev) static void ubd_device_release(struct device *dev) { - struct ubd *ubd_dev = dev_get_drvdata(dev); + struct ubd *ubd_dev = container_of(dev, struct ubd, pdev.dev); blk_mq_free_tag_set(&ubd_dev->tag_set); *ubd_dev = ((struct ubd) DEFAULT_UBD); diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 94a4dfac6c23..2baa8d4a33ed 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -823,7 +823,8 @@ static struct platform_driver uml_net_driver = { static void vector_device_release(struct device *dev) { - struct vector_device *device = dev_get_drvdata(dev); + struct vector_device *device = + container_of(dev, struct vector_device, pdev.dev); struct net_device *netdev = device->dev; list_del(&device->list); diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h index 0df646c6651e..3b382da2996f 100644 --- a/arch/um/include/shared/os.h +++ b/arch/um/include/shared/os.h @@ -211,7 +211,6 @@ extern int os_protect_memory(void *addr, unsigned long len, extern int os_unmap_memory(void *addr, int len); extern int os_drop_memory(void *addr, int length); extern int can_drop_memory(void); -extern int os_mincore(void *addr, unsigned long len); /* execvp.c */ extern int execvp_noalloc(char *buf, const char *file, char *const argv[]); diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile index 1c2d4b29a3d4..6407fc911007 100644 --- a/arch/um/kernel/Makefile +++ b/arch/um/kernel/Makefile @@ -17,7 +17,7 @@ extra-y := vmlinux.lds obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ physmem.o process.o ptrace.o reboot.o sigio.o \ signal.o sysrq.o time.o tlb.o trap.o \ - um_arch.o umid.o maccess.o kmsg_dump.o capflags.o skas/ + um_arch.o umid.o kmsg_dump.o capflags.o skas/ obj-y += load_file.o obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c deleted file mode 100644 index 8ccd56813f68..000000000000 --- a/arch/um/kernel/maccess.c +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2013 Richard Weinberger - */ - -#include -#include -#include - -bool copy_from_kernel_nofault_allowed(const void *src, size_t size) -{ - void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE); - - if ((unsigned long)src < PAGE_SIZE || size <= 0) - return false; - if (os_mincore(psrc, size + src - psrc) <= 0) - return false; - return true; -} diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 38d5a71a579b..f6c766b2bdf5 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -68,6 +68,7 @@ void __init mem_init(void) map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); memblock_free((void *)brk_end, uml_reserved - brk_end); uml_reserved = brk_end; + min_low_pfn = PFN_UP(__pa(uml_reserved)); /* this will put all low memory onto the freelists */ memblock_free_all(); diff --git a/arch/um/kernel/physmem.c b/arch/um/kernel/physmem.c index 91485119ae67..4339580f5a4f 100644 --- a/arch/um/kernel/physmem.c +++ b/arch/um/kernel/physmem.c @@ -80,10 +80,10 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end, unsigned long len, unsigned long long highmem) { unsigned long reserve = reserve_end - start; - long map_size = len - reserve; + unsigned long map_size = len - reserve; int err; - if(map_size <= 0) { + if (len <= reserve) { os_warn("Too few physical memory! Needed=%lu, given=%lu\n", reserve, len); exit(1); @@ -94,7 +94,7 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end, err = os_map_memory((void *) reserve_end, physmem_fd, reserve, map_size, 1, 1, 1); if (err < 0) { - os_warn("setup_physmem - mapping %ld bytes of memory at 0x%p " + os_warn("setup_physmem - mapping %lu bytes of memory at 0x%p " "failed - errno = %d\n", map_size, (void *) reserve_end, err); exit(1); diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index a351c87db248..c5281ce31685 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c @@ -397,6 +397,6 @@ int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) { int cpu = current_thread_info()->cpu; - return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu); + return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu) == 0; } diff --git a/arch/um/kernel/sysrq.c b/arch/um/kernel/sysrq.c index 746715379f12..7e897e44a03d 100644 --- a/arch/um/kernel/sysrq.c +++ b/arch/um/kernel/sysrq.c @@ -53,5 +53,5 @@ void show_stack(struct task_struct *task, unsigned long *stack, } printk("%sCall Trace:\n", loglvl); - dump_trace(current, &stackops, (void *)loglvl); + dump_trace(task ?: current, &stackops, (void *)loglvl); } diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c index e52dd37ddadc..2686120ab232 100644 --- a/arch/um/os-Linux/process.c +++ b/arch/um/os-Linux/process.c @@ -223,57 +223,6 @@ out: return ok; } -static int os_page_mincore(void *addr) -{ - char vec[2]; - int ret; - - ret = mincore(addr, UM_KERN_PAGE_SIZE, vec); - if (ret < 0) { - if (errno == ENOMEM || errno == EINVAL) - return 0; - else - return -errno; - } - - return vec[0] & 1; -} - -int os_mincore(void *addr, unsigned long len) -{ - char *vec; - int ret, i; - - if (len <= UM_KERN_PAGE_SIZE) - return os_page_mincore(addr); - - vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); - if (!vec) - return -ENOMEM; - - ret = mincore(addr, UM_KERN_PAGE_SIZE, vec); - if (ret < 0) { - if (errno == ENOMEM || errno == EINVAL) - ret = 0; - else - ret = -errno; - - goto out; - } - - for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) { - if (!(vec[i] & 1)) { - ret = 0; - goto out; - } - } - - ret = 1; -out: - free(vec); - return ret; -} - void init_new_thread_signals(void) { set_handler(SIGSEGV); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 49cea5b81649..8e66bb443351 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -208,7 +208,7 @@ config X86 select HAVE_SAMPLE_FTRACE_DIRECT_MULTI if X86_64 select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS - select HAVE_EISA + select HAVE_EISA if X86_32 select HAVE_EXIT_THREAD select HAVE_FAST_GUP select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE @@ -2506,7 +2506,8 @@ config CPU_IBPB_ENTRY depends on CPU_SUP_AMD && X86_64 default y help - Compile the kernel with support for the retbleed=ibpb mitigation. + Compile the kernel with support for the retbleed=ibpb and + spec_rstack_overflow={ibpb,ibpb-vmexit} mitigations. config CPU_IBRS_ENTRY bool "Enable IBRS on kernel entry" @@ -2574,6 +2575,17 @@ config MITIGATION_SPECTRE_BHI indirect branches. See +config MITIGATION_ITS + bool "Enable Indirect Target Selection mitigation" + depends on CPU_SUP_INTEL && X86_64 + depends on RETPOLINE && RETHUNK + default y + help + Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in + BPU on some Intel CPUs that may allow Spectre V2 style attacks. If + disabled, mitigation cannot be enabled via cmdline. + See + endif config ARCH_HAS_ADD_PAGES diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 897f56533e6c..21e1a3b6639d 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -34,6 +34,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ # avoid errors with '-march=i386', and future flags may depend on the target to # be valid. KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS) +KBUILD_CFLAGS += -std=gnu11 KBUILD_CFLAGS += -fno-strict-aliasing -fPIE KBUILD_CFLAGS += -Wundef KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh index c9299aeb7333..3882ead513f7 100644 --- a/arch/x86/boot/genimage.sh +++ b/arch/x86/boot/genimage.sh @@ -22,6 +22,7 @@ # This script requires: # bash # syslinux +# genisoimage # mtools (for fdimage* and hdimage) # edk2/OVMF (for hdimage) # @@ -251,7 +252,9 @@ geniso() { cp "$isolinux" "$ldlinux" "$tmp_dir" cp "$FBZIMAGE" "$tmp_dir"/linux echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg - cp "${FDINITRDS[@]}" "$tmp_dir"/ + if [ ${#FDINITRDS[@]} -gt 0 ]; then + cp "${FDINITRDS[@]}" "$tmp_dir"/ + fi genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \ -quiet -o "$FIMAGE" -b isolinux.bin \ -c boot.cat -no-emul-boot -boot-load-size 4 \ diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S index cdf3215ec272..82727b4fae90 100644 --- a/arch/x86/crypto/aegis128-aesni-asm.S +++ b/arch/x86/crypto/aegis128-aesni-asm.S @@ -21,7 +21,7 @@ #define T1 %xmm7 #define STATEP %rdi -#define LEN %rsi +#define LEN %esi #define SRC %rdx #define DST %rcx @@ -76,32 +76,32 @@ SYM_FUNC_START_LOCAL(__load_partial) xor %r9d, %r9d pxor MSG, MSG - mov LEN, %r8 + mov LEN, %r8d and $0x1, %r8 jz .Lld_partial_1 - mov LEN, %r8 + mov LEN, %r8d and $0x1E, %r8 add SRC, %r8 mov (%r8), %r9b .Lld_partial_1: - mov LEN, %r8 + mov LEN, %r8d and $0x2, %r8 jz .Lld_partial_2 - mov LEN, %r8 + mov LEN, %r8d and $0x1C, %r8 add SRC, %r8 shl $0x10, %r9 mov (%r8), %r9w .Lld_partial_2: - mov LEN, %r8 + mov LEN, %r8d and $0x4, %r8 jz .Lld_partial_4 - mov LEN, %r8 + mov LEN, %r8d and $0x18, %r8 add SRC, %r8 shl $32, %r9 @@ -111,11 +111,11 @@ SYM_FUNC_START_LOCAL(__load_partial) .Lld_partial_4: movq %r9, MSG - mov LEN, %r8 + mov LEN, %r8d and $0x8, %r8 jz .Lld_partial_8 - mov LEN, %r8 + mov LEN, %r8d and $0x10, %r8 add SRC, %r8 pslldq $8, MSG @@ -139,7 +139,7 @@ SYM_FUNC_END(__load_partial) * %r10 */ SYM_FUNC_START_LOCAL(__store_partial) - mov LEN, %r8 + mov LEN, %r8d mov DST, %r9 movq T0, %r10 @@ -677,7 +677,7 @@ SYM_TYPED_FUNC_START(crypto_aegis128_aesni_dec_tail) call __store_partial /* mask with byte count: */ - movq LEN, T0 + movd LEN, T0 punpcklbw T0, T0 punpcklbw T0, T0 punpcklbw T0, T0 @@ -702,7 +702,8 @@ SYM_FUNC_END(crypto_aegis128_aesni_dec_tail) /* * void crypto_aegis128_aesni_final(void *state, void *tag_xor, - * u64 assoclen, u64 cryptlen); + * unsigned int assoclen, + * unsigned int cryptlen); */ SYM_FUNC_START(crypto_aegis128_aesni_final) FRAME_BEGIN @@ -715,8 +716,8 @@ SYM_FUNC_START(crypto_aegis128_aesni_final) movdqu 0x40(STATEP), STATE4 /* prepare length block: */ - movq %rdx, MSG - movq %rcx, T0 + movd %edx, MSG + movd %ecx, T0 pslldq $8, T0 pxor T0, MSG psllq $3, MSG /* multiply by 8 (to get bit count) */ diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index f6907627172b..01e9593e2bd9 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h @@ -70,6 +70,8 @@ For 32-bit we have the following conventions - kernel is built with pushq %rsi /* pt_regs->si */ movq 8(%rsp), %rsi /* temporarily store the return address in %rsi */ movq %rdi, 8(%rsp) /* pt_regs->di (overwriting original return address) */ + /* We just clobbered the return address - use the IRET frame for unwinding: */ + UNWIND_HINT_IRET_REGS offset=3*8 .else pushq %rdi /* pt_regs->di */ pushq %rsi /* pt_regs->si */ diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S index f4419afc7147..bda217961172 100644 --- a/arch/x86/entry/entry.S +++ b/arch/x86/entry/entry.S @@ -16,7 +16,7 @@ SYM_FUNC_START(entry_ibpb) movl $MSR_IA32_PRED_CMD, %ecx - movl $PRED_CMD_IBPB, %eax + movl _ASM_RIP(x86_pred_cmd), %eax xorl %edx, %edx wrmsr diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index a114338380a6..4f5968810450 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1559,7 +1559,9 @@ SYM_CODE_END(rewind_stack_and_make_dead) * ORC to unwind properly. * * The alignment is for performance and not for safety, and may be safely - * refactored in the future if needed. + * refactored in the future if needed. The .skips are for safety, to ensure + * that all RETs are in the second half of a cacheline to mitigate Indirect + * Target Selection, rather than taking the slowpath via its_return_thunk. */ SYM_FUNC_START(clear_bhb_loop) push %rbp @@ -1569,10 +1571,22 @@ SYM_FUNC_START(clear_bhb_loop) call 1f jmp 5f .align 64, 0xcc + /* + * Shift instructions so that the RET is in the upper half of the + * cacheline and don't take the slowpath to its_return_thunk. + */ + .skip 32 - (.Lret1 - 1f), 0xcc ANNOTATE_INTRA_FUNCTION_CALL 1: call 2f - RET +.Lret1: RET .align 64, 0xcc + /* + * As above shift instructions for RET at .Lret2 as well. + * + * This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc + * but some Clang versions (e.g. 18) don't like this. + */ + .skip 32 - 18, 0xcc 2: movl $5, %eax 3: jmp 4f nop @@ -1580,7 +1594,7 @@ SYM_FUNC_START(clear_bhb_loop) jnz 3b sub $1, %ecx jnz 1b - RET +.Lret2: RET 5: lfence pop %rbp RET diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 3ac069a4559b..1282f1a70213 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -881,11 +881,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) static int amd_pmu_v2_handle_irq(struct pt_regs *regs) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + static atomic64_t status_warned = ATOMIC64_INIT(0); + u64 reserved, status, mask, new_bits, prev_bits; struct perf_sample_data data; struct hw_perf_event *hwc; struct perf_event *event; int handled = 0, idx; - u64 reserved, status, mask; bool pmu_enabled; /* @@ -952,7 +953,12 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) * the corresponding PMCs are expected to be inactive according to the * active_mask */ - WARN_ON(status > 0); + if (status > 0) { + prev_bits = atomic64_fetch_or(status, &status_warned); + // A new bit was set for the very first time. + new_bits = status & ~prev_bits; + WARN(new_bits, "New overflows for inactive PMCs: %llx\n", new_bits); + } /* Clear overflow and freeze bits */ amd_pmu_ack_global_status(~status); diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index 37cbbc5c659a..8c385e231e07 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -1216,7 +1216,8 @@ static __init int perf_ibs_op_init(void) if (ibs_caps & IBS_CAPS_OPCNTEXT) { perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK; perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK; - perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK; + perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK | + IBS_OP_CUR_CNT_EXT_MASK); } if (ibs_caps & IBS_CAPS_ZEN4) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 39f86188be54..a8732dd9fedb 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -623,7 +623,7 @@ int x86_pmu_hw_config(struct perf_event *event) if (event->attr.type == event->pmu->type) event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; - if (event->attr.sample_period && x86_pmu.limit_period) { + if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) { s64 left = event->attr.sample_period; x86_pmu.limit_period(event, &left); if (left > event->attr.sample_period) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 27968d10dd0b..b380fc5ea088 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2689,28 +2689,33 @@ static u64 adl_update_topdown_event(struct perf_event *event) DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); -static void intel_pmu_read_topdown_event(struct perf_event *event) -{ - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - - /* Only need to call update_topdown_event() once for group read. */ - if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && - !is_slots_event(event)) - return; - - perf_pmu_disable(event->pmu); - static_call(intel_pmu_update_topdown_event)(event); - perf_pmu_enable(event->pmu); -} - static void intel_pmu_read_event(struct perf_event *event) { - if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) - intel_pmu_auto_reload_read(event); - else if (is_topdown_count(event)) - intel_pmu_read_topdown_event(event); - else - x86_perf_event_update(event); + if (event->hw.flags & (PERF_X86_EVENT_AUTO_RELOAD | PERF_X86_EVENT_TOPDOWN)) { + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + bool pmu_enabled = cpuc->enabled; + + /* Only need to call update_topdown_event() once for group read. */ + if (is_metric_event(event) && (cpuc->txn_flags & PERF_PMU_TXN_READ)) + return; + + cpuc->enabled = 0; + if (pmu_enabled) + intel_pmu_disable_all(); + + if (is_topdown_event(event)) + static_call(intel_pmu_update_topdown_event)(event); + else + intel_pmu_drain_pebs_buffer(); + + cpuc->enabled = pmu_enabled; + if (pmu_enabled) + intel_pmu_enable_all(0); + + return; + } + + x86_perf_event_update(event); } static void intel_pmu_enable_fixed(struct perf_event *event) @@ -2975,7 +2980,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) handled++; x86_pmu_handle_guest_pebs(regs, &data); - x86_pmu.drain_pebs(regs, &data); + static_call(x86_pmu_drain_pebs)(regs, &data); status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; /* @@ -3852,6 +3857,85 @@ static inline bool intel_pmu_has_cap(struct perf_event *event, int idx) return test_bit(idx, (unsigned long *)&intel_cap->capabilities); } +static u64 intel_pmu_freq_start_period(struct perf_event *event) +{ + int type = event->attr.type; + u64 config, factor; + s64 start; + + /* + * The 127 is the lowest possible recommended SAV (sample after value) + * for a 4000 freq (default freq), according to the event list JSON file. + * Also, assume the workload is idle 50% time. + */ + factor = 64 * 4000; + if (type != PERF_TYPE_HARDWARE && type != PERF_TYPE_HW_CACHE) + goto end; + + /* + * The estimation of the start period in the freq mode is + * based on the below assumption. + * + * For a cycles or an instructions event, 1GHZ of the + * underlying platform, 1 IPC. The workload is idle 50% time. + * The start period = 1,000,000,000 * 1 / freq / 2. + * = 500,000,000 / freq + * + * Usually, the branch-related events occur less than the + * instructions event. According to the Intel event list JSON + * file, the SAV (sample after value) of a branch-related event + * is usually 1/4 of an instruction event. + * The start period of branch-related events = 125,000,000 / freq. + * + * The cache-related events occurs even less. The SAV is usually + * 1/20 of an instruction event. + * The start period of cache-related events = 25,000,000 / freq. + */ + config = event->attr.config & PERF_HW_EVENT_MASK; + if (type == PERF_TYPE_HARDWARE) { + switch (config) { + case PERF_COUNT_HW_CPU_CYCLES: + case PERF_COUNT_HW_INSTRUCTIONS: + case PERF_COUNT_HW_BUS_CYCLES: + case PERF_COUNT_HW_STALLED_CYCLES_FRONTEND: + case PERF_COUNT_HW_STALLED_CYCLES_BACKEND: + case PERF_COUNT_HW_REF_CPU_CYCLES: + factor = 500000000; + break; + case PERF_COUNT_HW_BRANCH_INSTRUCTIONS: + case PERF_COUNT_HW_BRANCH_MISSES: + factor = 125000000; + break; + case PERF_COUNT_HW_CACHE_REFERENCES: + case PERF_COUNT_HW_CACHE_MISSES: + factor = 25000000; + break; + default: + goto end; + } + } + + if (type == PERF_TYPE_HW_CACHE) + factor = 25000000; +end: + /* + * Usually, a prime or a number with less factors (close to prime) + * is chosen as an SAV, which makes it less likely that the sampling + * period synchronizes with some periodic event in the workload. + * Minus 1 to make it at least avoiding values near power of twos + * for the default freq. + */ + start = DIV_ROUND_UP_ULL(factor, event->attr.sample_freq) - 1; + + if (start > x86_pmu.max_period) + start = x86_pmu.max_period; + + if (x86_pmu.limit_period) + x86_pmu.limit_period(event, &start); + + return start; +} + static int intel_pmu_hw_config(struct perf_event *event) { int ret = x86_pmu_hw_config(event); @@ -3863,6 +3947,12 @@ static int intel_pmu_hw_config(struct perf_event *event) if (ret) return ret; + if (event->attr.freq && event->attr.sample_freq) { + event->hw.sample_period = intel_pmu_freq_start_period(event); + event->hw.last_period = event->hw.sample_period; + local64_set(&event->hw.period_left, event->hw.sample_period); + } + if (event->attr.precise_ip) { if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT) return -EINVAL; @@ -4087,7 +4177,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data) arr[pebs_enable] = (struct perf_guest_switch_msr){ .msr = MSR_IA32_PEBS_ENABLE, .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, - .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, + .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable, }; if (arr[pebs_enable].host) { @@ -4582,8 +4672,11 @@ static void intel_pmu_cpu_starting(int cpu) init_debug_store_on_cpu(cpu); /* - * Deal with CPUs that don't clear their LBRs on power-up. + * Deal with CPUs that don't clear their LBRs on power-up, and that may + * even boot with LBRs enabled. */ + if (!static_cpu_has(X86_FEATURE_ARCH_LBR) && x86_pmu.lbr_nr) + msr_clear_bit(MSR_IA32_DEBUGCTLMSR, DEBUGCTLMSR_LBR_BIT); intel_pmu_lbr_reset(); cpuc->lbr_sel = NULL; @@ -5409,8 +5502,22 @@ default_is_visible(struct kobject *kobj, struct attribute *attr, int i) return attr->mode; } +static umode_t +td_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + /* + * Hide the perf metrics topdown events + * if the feature is not enumerated. + */ + if (x86_pmu.num_topdown_events) + return x86_pmu.intel_cap.perf_metrics ? attr->mode : 0; + + return attr->mode; +} + static struct attribute_group group_events_td = { .name = "events", + .is_visible = td_is_visible, }; static struct attribute_group group_events_mem = { @@ -5587,9 +5694,27 @@ static umode_t hybrid_format_is_visible(struct kobject *kobj, return (cpu >= 0) && (pmu->cpu_type & pmu_attr->pmu_type) ? attr->mode : 0; } +static umode_t hybrid_td_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = kobj_to_dev(kobj); + struct x86_hybrid_pmu *pmu = + container_of(dev_get_drvdata(dev), struct x86_hybrid_pmu, pmu); + + if (!is_attr_for_this_pmu(kobj, attr)) + return 0; + + + /* Only the big core supports perf metrics */ + if (pmu->cpu_type == hybrid_big) + return pmu->intel_cap.perf_metrics ? attr->mode : 0; + + return attr->mode; +} + static struct attribute_group hybrid_group_events_td = { .name = "events", - .is_visible = hybrid_events_is_visible, + .is_visible = hybrid_td_is_visible, }; static struct attribute_group hybrid_group_events_mem = { diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 3ff38e7409e3..4b7faa3180e5 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -789,11 +789,11 @@ unlock: return 1; } -static inline void intel_pmu_drain_pebs_buffer(void) +void intel_pmu_drain_pebs_buffer(void) { struct perf_sample_data data; - x86_pmu.drain_pebs(NULL, &data); + static_call(x86_pmu_drain_pebs)(NULL, &data); } /* @@ -1149,8 +1149,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event) * + precise_ip < 2 for the non event IP * + For RTM TSX weight we need GPRs for the abort code. */ - gprs = (sample_type & PERF_SAMPLE_REGS_INTR) && - (attr->sample_regs_intr & PEBS_GP_REGS); + gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) && + (attr->sample_regs_intr & PEBS_GP_REGS)) || + ((sample_type & PERF_SAMPLE_REGS_USER) && + (attr->sample_regs_user & PEBS_GP_REGS)); tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) && ((attr->config & INTEL_ARCH_EVENT_MASK) == @@ -1792,7 +1794,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, regs->flags &= ~PERF_EFLAGS_EXACT; } - if (sample_type & PERF_SAMPLE_REGS_INTR) + if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) adaptive_pebs_save_regs(regs, gprs); } @@ -1902,15 +1904,6 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) return NULL; } -void intel_pmu_auto_reload_read(struct perf_event *event) -{ - WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)); - - perf_pmu_disable(event->pmu); - intel_pmu_drain_pebs_buffer(); - perf_pmu_enable(event->pmu); -} - /* * Special variant of intel_pmu_save_and_restart() for auto-reload. */ diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 4110246aba12..7ee8dc80a359 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -827,11 +827,13 @@ static void pt_buffer_advance(struct pt_buffer *buf) buf->cur_idx++; if (buf->cur_idx == buf->cur->last) { - if (buf->cur == buf->last) + if (buf->cur == buf->last) { buf->cur = buf->first; - else + buf->wrapped = true; + } else { buf->cur = list_entry(buf->cur->list.next, struct topa, list); + } buf->cur_idx = 0; } } @@ -845,8 +847,11 @@ static void pt_buffer_advance(struct pt_buffer *buf) static void pt_update_head(struct pt *pt) { struct pt_buffer *buf = perf_get_aux(&pt->handle); + bool wrapped = buf->wrapped; u64 topa_idx, base, old; + buf->wrapped = false; + if (buf->single) { local_set(&buf->data_size, buf->output_off); return; @@ -864,7 +869,7 @@ static void pt_update_head(struct pt *pt) } else { old = (local64_xchg(&buf->head, base) & ((buf->nr_pages << PAGE_SHIFT) - 1)); - if (base < old) + if (base < old || (base == old && wrapped)) base += buf->nr_pages << PAGE_SHIFT; local_add(base - old, &buf->data_size); diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index f5e46c04c145..a1b6c04b7f68 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -65,6 +65,7 @@ struct pt_pmu { * @head: logical write offset inside the buffer * @snapshot: if this is for a snapshot/overwrite counter * @single: use Single Range Output instead of ToPA + * @wrapped: buffer advance wrapped back to the first topa table * @stop_pos: STOP topa entry index * @intr_pos: INT topa entry index * @stop_te: STOP topa entry pointer @@ -82,6 +83,7 @@ struct pt_buffer { local64_t head; bool snapshot; bool single; + bool wrapped; long stop_pos, intr_pos; struct topa_entry *stop_te, *intr_te; void **data_pages; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index d081eb89ba12..831c4e3f371a 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -4656,28 +4656,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = { INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), /* Free-Running IIO BANDWIDTH IN Counters */ INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), + INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"), INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), { /* end: all zeroes */ }, }; @@ -5250,37 +5250,6 @@ static struct freerunning_counters icx_iio_freerunning[] = { [ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets }, }; -static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = { - /* Free-Running IIO CLOCKS Counter */ - INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), - /* Free-Running IIO BANDWIDTH IN Counters */ - INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), - { /* end: all zeroes */ }, -}; - static struct intel_uncore_type icx_uncore_iio_free_running = { .name = "iio_free_running", .num_counters = 9, @@ -5288,7 +5257,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = { .num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX, .freerunning = icx_iio_freerunning, .ops = &skx_uncore_iio_freerunning_ops, - .event_descs = icx_uncore_iio_freerunning_events, + .event_descs = snr_uncore_iio_freerunning_events, .format_group = &skx_uncore_iio_freerunning_format_group, }; @@ -5857,69 +5826,13 @@ static struct freerunning_counters spr_iio_freerunning[] = { [SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 }, }; -static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = { - /* Free-Running IIO CLOCKS Counter */ - INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"), - /* Free-Running IIO BANDWIDTH IN Counters */ - INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"), - /* Free-Running IIO BANDWIDTH OUT Counters */ - INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"), - INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"), - INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"), - INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"), - INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"), - INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"), - INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"), - INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"), - INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"), - INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"), - INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"), - { /* end: all zeroes */ }, -}; - static struct intel_uncore_type spr_uncore_iio_free_running = { .name = "iio_free_running", .num_counters = 17, .num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX, .freerunning = spr_iio_freerunning, .ops = &skx_uncore_iio_freerunning_ops, - .event_descs = spr_uncore_iio_freerunning_events, + .event_descs = snr_uncore_iio_freerunning_events, .format_group = &skx_uncore_iio_freerunning_format_group, }; diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 332d2e6d8ae4..a3e1da86181f 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1047,6 +1047,7 @@ extern struct x86_pmu x86_pmu __read_mostly; DECLARE_STATIC_CALL(x86_pmu_set_period, *x86_pmu.set_period); DECLARE_STATIC_CALL(x86_pmu_update, *x86_pmu.update); +DECLARE_STATIC_CALL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs); static __always_inline struct x86_perf_task_context_opt *task_context_opt(void *ctx) { @@ -1535,7 +1536,7 @@ void intel_pmu_pebs_disable_all(void); void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); -void intel_pmu_auto_reload_read(struct perf_event *event); +void intel_pmu_drain_pebs_buffer(void); void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 9542c582d546..fd3b73017738 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -5,6 +5,7 @@ #include #include #include +#include #define ALTINSTR_FLAG_INV (1 << 15) #define ALT_NOT(feat) ((feat) | ALTINSTR_FLAG_INV) @@ -81,6 +82,37 @@ extern void apply_ibt_endbr(s32 *start, s32 *end); struct module; +#ifdef CONFIG_MITIGATION_ITS +extern void its_init_mod(struct module *mod); +extern void its_fini_mod(struct module *mod); +extern void its_free_mod(struct module *mod); +extern u8 *its_static_thunk(int reg); +#else /* CONFIG_MITIGATION_ITS */ +static inline void its_init_mod(struct module *mod) { } +static inline void its_fini_mod(struct module *mod) { } +static inline void its_free_mod(struct module *mod) { } +static inline u8 *its_static_thunk(int reg) +{ + WARN_ONCE(1, "ITS not compiled in"); + + return NULL; +} +#endif + +#if defined(CONFIG_RETHUNK) && defined(CONFIG_OBJTOOL) +extern bool cpu_wants_rethunk(void); +extern bool cpu_wants_rethunk_at(void *addr); +#else +static __always_inline bool cpu_wants_rethunk(void) +{ + return false; +} +static __always_inline bool cpu_wants_rethunk_at(void *addr) +{ + return false; +} +#endif + #ifdef CONFIG_SMP extern void alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index ed0eaf65c437..c8cdc69aae09 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -116,7 +116,10 @@ static inline bool amd_gart_present(void) #define amd_nb_num(x) 0 #define amd_nb_has_feature(x) false -#define node_to_amd_nb(x) NULL +static inline struct amd_northbridge *node_to_amd_nb(int node) +{ + return NULL; +} #define amd_gart_present(x) false #endif diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 35389b2af88e..0216f63a366b 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -81,22 +81,4 @@ do { \ #include -/* - * Make previous memory operations globally visible before - * a WRMSR. - * - * MFENCE makes writes visible, but only affects load/store - * instructions. WRMSR is unfortunately not a load/store - * instruction and is unaffected by MFENCE. The LFENCE ensures - * that the WRMSR is not reordered. - * - * Most WRMSRs are full serializing instructions themselves and - * do not require this barrier. This is only required for the - * IA32_TSC_DEADLINE and X2APIC MSRs. - */ -static inline void weak_wrmsr_fence(void) -{ - asm volatile("mfence; lfence" : : : "memory"); -} - #endif /* _ASM_X86_BARRIER_H */ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 91224a926137..28edef597282 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -311,6 +311,7 @@ #define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ #define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ +#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ @@ -444,6 +445,7 @@ #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */ #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ +#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */ /* * BUG word(s) @@ -494,4 +496,6 @@ #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */ #define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */ #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ +#define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */ +#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index abc07d004589..29bef25ac77c 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -47,6 +47,7 @@ KVM_X86_OP(set_idt) KVM_X86_OP(get_gdt) KVM_X86_OP(set_gdt) KVM_X86_OP(sync_dirty_debug_regs) +KVM_X86_OP(set_dr6) KVM_X86_OP(set_dr7) KVM_X86_OP(cache_reg) KVM_X86_OP(get_rflags) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9de3db4a32f8..eb06c2f68314 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1499,6 +1499,7 @@ struct kvm_x86_ops { void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); + void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 5d7494631ea9..c07c018a1c13 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -33,6 +33,8 @@ typedef struct { */ atomic64_t tlb_gen; + unsigned long next_trim_cpumask; + #ifdef CONFIG_MODIFY_LDT_SYSCALL struct rw_semaphore ldt_usr_sem; struct ldt_struct *ldt; diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index b8d40ddeab00..6f5c7584fe1e 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -106,6 +106,7 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id); atomic64_set(&mm->context.tlb_gen, 0); + mm->context.next_trim_cpumask = jiffies + HZ; #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 681e8401b8a3..727947ed5e5e 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -185,6 +185,14 @@ * VERW clears CPU Register * File. */ +#define ARCH_CAP_ITS_NO BIT_ULL(62) /* + * Not susceptible to + * Indirect Target Selection. + * This bit is not set by + * HW, but is synthesized by + * VMMs for guests to know + * their affected status. + */ #define ARCH_CAP_XAPIC_DISABLE BIT(21) /* * IA32_XAPIC_DISABLE_STATUS MSR @@ -357,7 +365,8 @@ #define MSR_IA32_PASID_VALID BIT_ULL(31) /* DEBUGCTLMSR bits (others vary by model): */ -#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ +#define DEBUGCTLMSR_LBR_BIT 0 /* last branch recording */ +#define DEBUGCTLMSR_LBR (1UL << DEBUGCTLMSR_LBR_BIT) #define DEBUGCTLMSR_BTF_SHIFT 1 #define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */ #define DEBUGCTLMSR_BUS_LOCK_DETECT (1UL << 2) diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 5c5f1e56c404..6f3d145670a9 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h @@ -59,6 +59,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *); void unregister_nmi_handler(unsigned int, const char *); +void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler); + void stop_nmi(void); void restart_nmi(void); void local_touch_nmi(void); diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index daf58a96e0a7..cccd5072e09f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -119,9 +119,8 @@ .endm /* - * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call - * to the retpoline thunk with a CS prefix when the register requires - * a RAX prefix byte to encode. Also see apply_retpolines(). + * Emits a conditional CS prefix that is compatible with + * -mindirect-branch-cs-prefix. */ .macro __CS_PREFIX reg:req .irp rs,r8,r9,r10,r11,r12,r13,r14,r15 @@ -245,8 +244,12 @@ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" +#define ITS_THUNK_SIZE 64 + typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; +typedef u8 its_thunk_t[ITS_THUNK_SIZE]; extern retpoline_thunk_t __x86_indirect_thunk_array[]; +extern its_thunk_t __x86_indirect_its_thunk_array[]; #ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); @@ -254,6 +257,12 @@ extern void __x86_return_thunk(void); static inline void __x86_return_thunk(void) {} #endif +#ifdef CONFIG_MITIGATION_ITS +extern void its_return_thunk(void); +#else +static inline void its_return_thunk(void) {} +#endif + extern void retbleed_return_thunk(void); extern void srso_return_thunk(void); extern void srso_alias_return_thunk(void); @@ -280,20 +289,23 @@ extern void (*x86_return_thunk)(void); #ifdef CONFIG_X86_64 +/* + * Emits a conditional CS prefix that is compatible with + * -mindirect-branch-cs-prefix. + */ +#define __CS_PREFIX(reg) \ + ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \ + ".ifc \\rs," reg "\n" \ + ".byte 0x2e\n" \ + ".endif\n" \ + ".endr\n" + /* * Inline asm uses the %V modifier which is only in newer GCC * which is ensured when CONFIG_RETPOLINE is defined. */ -# define CALL_NOSPEC \ - ALTERNATIVE_2( \ - ANNOTATE_RETPOLINE_SAFE \ - "call *%[thunk_target]\n", \ - "call __x86_indirect_thunk_%V[thunk_target]\n", \ - X86_FEATURE_RETPOLINE, \ - "lfence;\n" \ - ANNOTATE_RETPOLINE_SAFE \ - "call *%[thunk_target]\n", \ - X86_FEATURE_RETPOLINE_LFENCE) +#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \ + "call __x86_indirect_thunk_%V[thunk_target]\n" # define THUNK_TARGET(addr) [thunk_target] "r" (addr) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 4d810b9478a4..fa3576ef19da 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -456,6 +456,7 @@ struct pebs_xmm { */ #define IBS_OP_CUR_CNT (0xFFF80ULL<<32) #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) +#define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52) #define IBS_OP_CNT_CTL (1ULL<<19) #define IBS_OP_VAL (1ULL<<18) #define IBS_OP_ENABLE (1ULL<<17) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 3ed6cc778503..d8fd09de63d5 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -199,6 +199,8 @@ static inline unsigned long long l1tf_pfn_limit(void) return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT); } +void init_cpu_devs(void); +void get_cpu_vendor(struct cpuinfo_x86 *c); extern void early_cpu_init(void); extern void identify_boot_cpu(void); extern void identify_secondary_cpu(struct cpuinfo_x86 *); @@ -868,4 +870,22 @@ bool arch_is_platform_page(u64 paddr); extern bool gds_ucode_mitigated(void); +/* + * Make previous memory operations globally visible before + * a WRMSR. + * + * MFENCE makes writes visible, but only affects load/store + * instructions. WRMSR is unfortunately not a load/store + * instruction and is unaffected by MFENCE. The LFENCE ensures + * that the WRMSR is not reordered. + * + * Most WRMSRs are full serializing instructions themselves and + * do not require this barrier. This is only required for the + * IA32_TSC_DEADLINE and X2APIC MSRs. + */ +static inline void weak_wrmsr_fence(void) +{ + alternative("mfence; lfence", "", ALT_NOT(X86_FEATURE_APIC_MSRS_FENCE)); +} + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h index cb0386fc4dc3..c648502e4535 100644 --- a/arch/x86/include/asm/spec-ctrl.h +++ b/arch/x86/include/asm/spec-ctrl.h @@ -4,6 +4,7 @@ #include #include +#include /* * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR @@ -76,6 +77,16 @@ static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; } +/* + * This can be used in noinstr functions & should only be called in bare + * metal context. + */ +static __always_inline void __update_spec_ctrl(u64 val) +{ + __this_cpu_write(x86_spec_ctrl_current, val); + native_wrmsrl(MSR_IA32_SPEC_CTRL, val); +} + #ifdef CONFIG_SMP extern void speculative_store_bypass_ht_init(void); #else diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index c2e322189f85..373aed0f1422 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -225,7 +225,7 @@ static inline void clwb(volatile void *__p) #define nop() asm volatile ("nop") -static inline void serialize(void) +static __always_inline void serialize(void) { /* Instruction opcode for SERIALIZE; supported in binutils >= 2.35. */ asm volatile(".byte 0xf, 0x1, 0xe8" ::: "memory"); diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h index 343b722ccaf2..35d75c19b1e3 100644 --- a/arch/x86/include/asm/static_call.h +++ b/arch/x86/include/asm/static_call.h @@ -65,4 +65,19 @@ extern bool __static_call_fixup(void *tramp, u8 op, void *dest); +extern void __static_call_update_early(void *tramp, void *func); + +#define static_call_update_early(name, _func) \ +({ \ + typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \ + if (static_call_initialized) { \ + __static_call_update(&STATIC_CALL_KEY(name), \ + STATIC_CALL_TRAMP_ADDR(name), __F);\ + } else { \ + WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \ + __static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\ + __F); \ + } \ +}) + #endif /* _ASM_STATIC_CALL_H */ diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h index ab7382f92aff..96bda43538ee 100644 --- a/arch/x86/include/asm/sync_core.h +++ b/arch/x86/include/asm/sync_core.h @@ -8,7 +8,7 @@ #include #ifdef CONFIG_X86_32 -static inline void iret_to_self(void) +static __always_inline void iret_to_self(void) { asm volatile ( "pushfl\n\t" @@ -19,7 +19,7 @@ static inline void iret_to_self(void) : ASM_CALL_CONSTRAINT : : "memory"); } #else -static inline void iret_to_self(void) +static __always_inline void iret_to_self(void) { unsigned int tmp; @@ -55,7 +55,7 @@ static inline void iret_to_self(void) * Like all of Linux's memory ordering operations, this is a * compiler barrier as well. */ -static inline void sync_core(void) +static __always_inline void sync_core(void) { /* * The SERIALIZE instruction is the most straightforward way to diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index cda3118f3b27..5abe6c3d982a 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -208,6 +208,7 @@ struct flush_tlb_info { unsigned int initiating_cpu; u8 stride_shift; u8 freed_tables; + u8 trim_cpumask; }; void flush_tlb_local(void); @@ -227,7 +228,7 @@ void flush_tlb_multi(const struct cpumask *cpumask, flush_tlb_mm_range((vma)->vm_mm, start, end, \ ((vma)->vm_flags & VM_HUGETLB) \ ? huge_page_shift(hstate_vma(vma)) \ - : PAGE_SHIFT, false) + : PAGE_SHIFT, true) extern void flush_tlb_all(void); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index e5e0fe10c692..9b4eddd5833a 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -39,9 +39,11 @@ #include #include #include +#include #include +#include #include #include #include @@ -86,11 +88,20 @@ struct xen_dm_op_buf; * there aren't more than 5 arguments...) */ -extern struct { char _entry[32]; } hypercall_page[]; +void xen_hypercall_func(void); +DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func); -#define __HYPERCALL "call hypercall_page+%c[offset]" -#define __HYPERCALL_ENTRY(x) \ - [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0])) +#ifdef MODULE +#define __ADDRESSABLE_xen_hypercall +#else +#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall) +#endif + +#define __HYPERCALL \ + __ADDRESSABLE_xen_hypercall \ + "call __SCT__xen_hypercall" + +#define __HYPERCALL_ENTRY(x) "a" (x) #ifdef CONFIG_X86_32 #define __HYPERCALL_RETREG "eax" @@ -148,7 +159,7 @@ extern struct { char _entry[32]; } hypercall_page[]; __HYPERCALL_0ARG(); \ asm volatile (__HYPERCALL \ : __HYPERCALL_0PARAM \ - : __HYPERCALL_ENTRY(name) \ + : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ : __HYPERCALL_CLOBBER0); \ (type)__res; \ }) @@ -159,7 +170,7 @@ extern struct { char _entry[32]; } hypercall_page[]; __HYPERCALL_1ARG(a1); \ asm volatile (__HYPERCALL \ : __HYPERCALL_1PARAM \ - : __HYPERCALL_ENTRY(name) \ + : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ : __HYPERCALL_CLOBBER1); \ (type)__res; \ }) @@ -170,7 +181,7 @@ extern struct { char _entry[32]; } hypercall_page[]; __HYPERCALL_2ARG(a1, a2); \ asm volatile (__HYPERCALL \ : __HYPERCALL_2PARAM \ - : __HYPERCALL_ENTRY(name) \ + : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ : __HYPERCALL_CLOBBER2); \ (type)__res; \ }) @@ -181,7 +192,7 @@ extern struct { char _entry[32]; } hypercall_page[]; __HYPERCALL_3ARG(a1, a2, a3); \ asm volatile (__HYPERCALL \ : __HYPERCALL_3PARAM \ - : __HYPERCALL_ENTRY(name) \ + : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ : __HYPERCALL_CLOBBER3); \ (type)__res; \ }) @@ -192,7 +203,7 @@ extern struct { char _entry[32]; } hypercall_page[]; __HYPERCALL_4ARG(a1, a2, a3, a4); \ asm volatile (__HYPERCALL \ : __HYPERCALL_4PARAM \ - : __HYPERCALL_ENTRY(name) \ + : __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \ : __HYPERCALL_CLOBBER4); \ (type)__res; \ }) @@ -206,12 +217,9 @@ xen_single_call(unsigned int call, __HYPERCALL_DECLS; __HYPERCALL_5ARG(a1, a2, a3, a4, a5); - if (call >= PAGE_SIZE / sizeof(hypercall_page[0])) - return -EINVAL; - - asm volatile(CALL_NOSPEC + asm volatile(__HYPERCALL : __HYPERCALL_5PARAM - : [thunk_target] "a" (&hypercall_page[call]) + : __HYPERCALL_ENTRY(call) : __HYPERCALL_CLOBBER5); return (long)__res; diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 69f85e274611..3bc6be6404d2 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -30,6 +32,8 @@ #include #include #include +#include +#include int __read_mostly alternatives_patched; @@ -396,6 +400,212 @@ static int emit_indirect(int op, int reg, u8 *bytes) return i; } +#ifdef CONFIG_MITIGATION_ITS + +#ifdef CONFIG_MODULES +static struct module *its_mod; +static void *its_page; +static unsigned int its_offset; + +/* Initialize a thunk with the "jmp *reg; int3" instructions. */ +static void *its_init_thunk(void *thunk, int reg) +{ + u8 *bytes = thunk; + int i = 0; + + if (reg >= 8) { + bytes[i++] = 0x41; /* REX.B prefix */ + reg -= 8; + } + bytes[i++] = 0xff; + bytes[i++] = 0xe0 + reg; /* jmp *reg */ + bytes[i++] = 0xcc; + + return thunk; +} + +void its_init_mod(struct module *mod) +{ + if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) + return; + + mutex_lock(&text_mutex); + its_mod = mod; + its_page = NULL; +} + +void its_fini_mod(struct module *mod) +{ + if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) + return; + + WARN_ON_ONCE(its_mod != mod); + + its_mod = NULL; + its_page = NULL; + mutex_unlock(&text_mutex); + + for (int i = 0; i < mod->its_num_pages; i++) { + void *page = mod->its_page_array[i]; + set_memory_ro((unsigned long)page, 1); + set_memory_x((unsigned long)page, 1); + } +} + +void its_free_mod(struct module *mod) +{ + if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) + return; + + for (int i = 0; i < mod->its_num_pages; i++) { + void *page = mod->its_page_array[i]; + module_memfree(page); + } + kfree(mod->its_page_array); +} + +DEFINE_FREE(its_execmem, void *, if (_T) module_memfree(_T)); + +static void *its_alloc(void) +{ + void *page __free(its_execmem) = module_alloc(PAGE_SIZE); + + if (!page) + return NULL; + + if (its_mod) { + void *tmp = krealloc(its_mod->its_page_array, + (its_mod->its_num_pages+1) * sizeof(void *), + GFP_KERNEL); + if (!tmp) + return NULL; + + its_mod->its_page_array = tmp; + its_mod->its_page_array[its_mod->its_num_pages++] = page; + } + + return no_free_ptr(page); +} + +static void *its_allocate_thunk(int reg) +{ + int size = 3 + (reg / 8); + void *thunk; + + if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) { + its_page = its_alloc(); + if (!its_page) { + pr_err("ITS page allocation failed\n"); + return NULL; + } + memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE); + its_offset = 32; + } + + /* + * If the indirect branch instruction will be in the lower half + * of a cacheline, then update the offset to reach the upper half. + */ + if ((its_offset + size - 1) % 64 < 32) + its_offset = ((its_offset - 1) | 0x3F) + 33; + + thunk = its_page + its_offset; + its_offset += size; + + set_memory_rw((unsigned long)its_page, 1); + thunk = its_init_thunk(thunk, reg); + set_memory_ro((unsigned long)its_page, 1); + set_memory_x((unsigned long)its_page, 1); + + return thunk; +} +#else /* CONFIG_MODULES */ + +static void *its_allocate_thunk(int reg) +{ + return NULL; +} + +#endif /* CONFIG_MODULES */ + +static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes, + void *call_dest, void *jmp_dest) +{ + u8 op = insn->opcode.bytes[0]; + int i = 0; + + /* + * Clang does 'weird' Jcc __x86_indirect_thunk_r11 conditional + * tail-calls. Deal with them. + */ + if (is_jcc32(insn)) { + bytes[i++] = op; + op = insn->opcode.bytes[1]; + goto clang_jcc; + } + + if (insn->length == 6) + bytes[i++] = 0x2e; /* CS-prefix */ + + switch (op) { + case CALL_INSN_OPCODE: + __text_gen_insn(bytes+i, op, addr+i, + call_dest, + CALL_INSN_SIZE); + i += CALL_INSN_SIZE; + break; + + case JMP32_INSN_OPCODE: +clang_jcc: + __text_gen_insn(bytes+i, op, addr+i, + jmp_dest, + JMP32_INSN_SIZE); + i += JMP32_INSN_SIZE; + break; + + default: + WARN(1, "%pS %px %*ph\n", addr, addr, 6, addr); + return -1; + } + + WARN_ON_ONCE(i != insn->length); + + return i; +} + +static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes) +{ + u8 *thunk = __x86_indirect_its_thunk_array[reg]; + u8 *tmp = its_allocate_thunk(reg); + + if (tmp) + thunk = tmp; + + return __emit_trampoline(addr, insn, bytes, thunk, thunk); +} + +/* Check if an indirect branch is at ITS-unsafe address */ +static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg) +{ + if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) + return false; + + /* Indirect branch opcode is 2 or 3 bytes depending on reg */ + addr += 1 + reg / 8; + + /* Lower-half of the cacheline? */ + return !(addr & 0x20); +} + +u8 *its_static_thunk(int reg) +{ + u8 *thunk = __x86_indirect_its_thunk_array[reg]; + + return thunk; +} + +#endif + /* * Rewrite the compiler generated retpoline thunk calls. * @@ -466,6 +676,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes) bytes[i++] = 0xe8; /* LFENCE */ } +#ifdef CONFIG_MITIGATION_ITS + /* + * Check if the address of last byte of emitted-indirect is in + * lower-half of the cacheline. Such branches need ITS mitigation. + */ + if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg)) + return emit_its_trampoline(addr, insn, reg, bytes); +#endif + ret = emit_indirect(op, reg, bytes + i); if (ret < 0) return ret; @@ -537,6 +756,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end) #ifdef CONFIG_RETHUNK +bool cpu_wants_rethunk(void) +{ + return cpu_feature_enabled(X86_FEATURE_RETHUNK); +} + +bool cpu_wants_rethunk_at(void *addr) +{ + if (!cpu_feature_enabled(X86_FEATURE_RETHUNK)) + return false; + if (x86_return_thunk != its_return_thunk) + return true; + + return !((unsigned long)addr & 0x20); +} + /* * Rewrite the compiler generated return thunk tail-calls. * @@ -552,13 +786,12 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) { int i = 0; - if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { - if (x86_return_thunk == __x86_return_thunk) - return -1; - + /* Patch the custom return thunks... */ + if (cpu_wants_rethunk_at(addr)) { i = JMP32_INSN_SIZE; __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); } else { + /* ... or patch them out if not needed. */ bytes[i++] = RET_INSN_OPCODE; } @@ -914,6 +1147,8 @@ static noinline void __init int3_selftest(void) void __init alternative_instructions(void) { + u64 ibt; + int3_selftest(); /* @@ -951,6 +1186,9 @@ void __init alternative_instructions(void) */ paravirt_set_cap(); + /* Keep CET-IBT disabled until caller/callee are patched */ + ibt = ibt_save(); + /* * First patch paravirt functions, such that we overwrite the indirect * call with the direct call. @@ -972,6 +1210,8 @@ void __init alternative_instructions(void) apply_ibt_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end); + ibt_restore(ibt); + #ifdef CONFIG_SMP /* Patch to UP if other cpus not imminent. */ if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index e8cc042e4905..e5e7c43bf67b 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -342,7 +342,6 @@ bool __init early_is_amd_nb(u32 device) struct resource *amd_get_mmconfig_range(struct resource *res) { - u32 address; u64 base, msr; unsigned int segn_busn_bits; @@ -350,13 +349,11 @@ struct resource *amd_get_mmconfig_range(struct resource *res) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return NULL; - /* assume all cpus from fam10h have mmconfig */ - if (boot_cpu_data.x86 < 0x10) + /* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ + if (boot_cpu_data.x86 < 0x10 || + rdmsrl_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) return NULL; - address = MSR_FAM10H_MMIO_CONF_BASE; - rdmsrl(address, msr); - /* mmconfig is not enabled */ if (!(msr & FAM10H_MMIO_CONF_ENABLE)) return NULL; @@ -519,6 +516,10 @@ static __init void fix_erratum_688(void) static __init int init_amd_nbs(void) { + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return 0; + amd_cache_northbridges(); amd_cache_gart(); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 425092806f8f..9ac93b4ba67b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -787,7 +787,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c) * (model = 0x14) and later actually support it. * (AMD Erratum #110, docId: 25759). */ - if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { + if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) { clear_cpu_cap(c, X86_FEATURE_LAHF_LM); if (!rdmsrl_amd_safe(0xc001100d, &value)) { value &= ~BIT_64(32); @@ -1133,6 +1133,9 @@ static void init_amd(struct cpuinfo_x86 *c) if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has_amd_erratum(c, amd_erratum_1485)) msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); + + /* AMD CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ + clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index fa2045ad408a..7233474c798f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -48,6 +48,7 @@ static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); static void __init gds_select_mitigation(void); static void __init srso_select_mitigation(void); +static void __init its_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -66,6 +67,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex); void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; +static void __init set_return_thunk(void *thunk) +{ + if (x86_return_thunk != __x86_return_thunk) + pr_warn("x86/bugs: return thunk changed\n"); + + x86_return_thunk = thunk; +} + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { @@ -92,7 +101,7 @@ void update_spec_ctrl_cond(u64 val) wrmsrl(MSR_IA32_SPEC_CTRL, val); } -u64 spec_ctrl_current(void) +noinstr u64 spec_ctrl_current(void) { return this_cpu_read(x86_spec_ctrl_current); } @@ -174,6 +183,7 @@ void __init cpu_select_mitigations(void) */ srso_select_mitigation(); gds_select_mitigation(); + its_select_mitigation(); } /* @@ -1081,7 +1091,7 @@ do_cmd_auto: setup_force_cpu_cap(X86_FEATURE_UNRET); if (IS_ENABLED(CONFIG_RETHUNK)) - x86_return_thunk = retbleed_return_thunk; + set_return_thunk(retbleed_return_thunk); if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) @@ -1092,6 +1102,8 @@ do_cmd_auto: case RETBLEED_MITIGATION_IBPB: setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + mitigate_smt = true; /* * IBPB on entry already obviates the need for @@ -1101,8 +1113,6 @@ do_cmd_auto: setup_clear_cpu_cap(X86_FEATURE_UNRET); setup_clear_cpu_cap(X86_FEATURE_RETHUNK); - mitigate_smt = true; - /* * There is no need for RSB filling: entry_ibpb() ensures * all predictions, including the RSB, are invalidated, @@ -1142,6 +1152,116 @@ do_cmd_auto: pr_info("%s\n", retbleed_strings[retbleed_mitigation]); } +#undef pr_fmt +#define pr_fmt(fmt) "ITS: " fmt + +enum its_mitigation_cmd { + ITS_CMD_OFF, + ITS_CMD_ON, + ITS_CMD_VMEXIT, +}; + +enum its_mitigation { + ITS_MITIGATION_OFF, + ITS_MITIGATION_VMEXIT_ONLY, + ITS_MITIGATION_ALIGNED_THUNKS, +}; + +static const char * const its_strings[] = { + [ITS_MITIGATION_OFF] = "Vulnerable", + [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", + [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks", +}; + +static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS; + +static enum its_mitigation_cmd its_cmd __ro_after_init = + IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF; + +static int __init its_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { + pr_err("Mitigation disabled at compile time, ignoring option (%s)", str); + return 0; + } + + if (!strcmp(str, "off")) { + its_cmd = ITS_CMD_OFF; + } else if (!strcmp(str, "on")) { + its_cmd = ITS_CMD_ON; + } else if (!strcmp(str, "force")) { + its_cmd = ITS_CMD_ON; + setup_force_cpu_bug(X86_BUG_ITS); + } else if (!strcmp(str, "vmexit")) { + its_cmd = ITS_CMD_VMEXIT; + } else { + pr_err("Ignoring unknown indirect_target_selection option (%s).", str); + } + + return 0; +} +early_param("indirect_target_selection", its_parse_cmdline); + +static void __init its_select_mitigation(void) +{ + enum its_mitigation_cmd cmd = its_cmd; + + if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) { + its_mitigation = ITS_MITIGATION_OFF; + return; + } + + /* Exit early to avoid irrelevant warnings */ + if (cmd == ITS_CMD_OFF) { + its_mitigation = ITS_MITIGATION_OFF; + goto out; + } + if (spectre_v2_enabled == SPECTRE_V2_NONE) { + pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); + its_mitigation = ITS_MITIGATION_OFF; + goto out; + } + if (!IS_ENABLED(CONFIG_RETPOLINE) || !IS_ENABLED(CONFIG_RETHUNK)) { + pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); + its_mitigation = ITS_MITIGATION_OFF; + goto out; + } + if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { + pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); + its_mitigation = ITS_MITIGATION_OFF; + goto out; + } + if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { + pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); + its_mitigation = ITS_MITIGATION_OFF; + goto out; + } + + switch (cmd) { + case ITS_CMD_OFF: + its_mitigation = ITS_MITIGATION_OFF; + break; + case ITS_CMD_VMEXIT: + if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) { + its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; + goto out; + } + fallthrough; + case ITS_CMD_ON: + its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; + if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) + setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + set_return_thunk(its_return_thunk); + break; + } +out: + pr_info("%s\n", its_strings[its_mitigation]); +} + #undef pr_fmt #define pr_fmt(fmt) "Spectre V2 : " fmt @@ -1262,9 +1382,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd; static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) { + enum spectre_v2_user_cmd mode; char arg[20]; int ret, i; + mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? + SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE; + switch (spectre_v2_cmd) { case SPECTRE_V2_CMD_NONE: return SPECTRE_V2_USER_CMD_NONE; @@ -1277,7 +1401,7 @@ spectre_v2_parse_user_cmdline(void) ret = cmdline_find_option(boot_command_line, "spectre_v2_user", arg, sizeof(arg)); if (ret < 0) - return SPECTRE_V2_USER_CMD_AUTO; + return mode; for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { if (match_option(arg, ret, v2_user_options[i].option)) { @@ -1287,8 +1411,8 @@ spectre_v2_parse_user_cmdline(void) } } - pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); - return SPECTRE_V2_USER_CMD_AUTO; + pr_err("Unknown user space protection option (%s). Switching to default\n", arg); + return mode; } static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode) @@ -1553,7 +1677,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void) rrsba_disabled = true; } -static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode) +static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode) { /* * Similar to context switches, there are two types of RSB attacks @@ -1577,27 +1701,30 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_ */ switch (mode) { case SPECTRE_V2_NONE: - return; + break; - case SPECTRE_V2_EIBRS_LFENCE: case SPECTRE_V2_EIBRS: - if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { - setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); - pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); - } - return; - + case SPECTRE_V2_EIBRS_LFENCE: case SPECTRE_V2_EIBRS_RETPOLINE: + if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { + pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); + setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); + } + break; + case SPECTRE_V2_RETPOLINE: case SPECTRE_V2_LFENCE: case SPECTRE_V2_IBRS: + pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n"); + setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); - pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n"); - return; - } + break; - pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit"); - dump_stack(); + default: + pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n"); + dump_stack(); + break; + } } /* @@ -1653,10 +1780,11 @@ static void __init bhi_select_mitigation(void) return; } - if (spec_ctrl_bhi_dis()) + if (!IS_ENABLED(CONFIG_X86_64)) return; - if (!IS_ENABLED(CONFIG_X86_64)) + /* Mitigate in hardware if supported */ + if (spec_ctrl_bhi_dis()) return; /* Mitigate KVM by default */ @@ -1822,10 +1950,7 @@ static void __init spectre_v2_select_mitigation(void) * * FIXME: Is this pointless for retbleed-affected AMD? */ - setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); - pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); - - spectre_v2_determine_rsb_fill_type_at_vmexit(mode); + spectre_v2_select_rsb_mitigation(mode); /* * Retpoline protects the kernel, but doesn't protect firmware. IBRS @@ -2591,10 +2716,10 @@ static void __init srso_select_mitigation(void) if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - x86_return_thunk = srso_alias_return_thunk; + set_return_thunk(srso_alias_return_thunk); } else { setup_force_cpu_cap(X86_FEATURE_SRSO); - x86_return_thunk = srso_return_thunk; + set_return_thunk(srso_return_thunk); } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { @@ -2607,6 +2732,7 @@ static void __init srso_select_mitigation(void) if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { if (has_microcode) { setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); srso_mitigation = SRSO_MITIGATION_IBPB; /* @@ -2616,6 +2742,13 @@ static void __init srso_select_mitigation(void) */ setup_clear_cpu_cap(X86_FEATURE_UNRET); setup_clear_cpu_cap(X86_FEATURE_RETHUNK); + + /* + * There is no need for RSB filling: entry_ibpb() ensures + * all predictions, including the RSB, are invalidated, + * regardless of IBPB implementation. + */ + setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); } } else { pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); @@ -2624,8 +2757,8 @@ static void __init srso_select_mitigation(void) break; case SRSO_CMD_IBPB_ON_VMEXIT: - if (IS_ENABLED(CONFIG_CPU_SRSO)) { - if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { + if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { + if (has_microcode) { setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; @@ -2637,9 +2770,9 @@ static void __init srso_select_mitigation(void) setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); } } else { - pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); goto pred_cmd; - } + } break; default: @@ -2766,6 +2899,11 @@ static ssize_t rfds_show_state(char *buf) return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); } +static ssize_t its_show_state(char *buf) +{ + return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); +} + static char *stibp_state(void) { if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && @@ -2950,6 +3088,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_RFDS: return rfds_show_state(buf); + case X86_BUG_ITS: + return its_show_state(buf); + default: break; } @@ -3029,4 +3170,9 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib { return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); } + +ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_ITS); +} #endif diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 66556833d7af..7cf43c6fb59d 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -801,7 +801,7 @@ void init_intel_cacheinfo(struct cpuinfo_x86 *c) cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); /* If bit 31 is set, this is an unknown format */ - for (j = 0 ; j < 3 ; j++) + for (j = 0 ; j < 4 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7f922a359ccc..48cc1612df49 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -906,7 +906,7 @@ void detect_ht(struct cpuinfo_x86 *c) #endif } -static void get_cpu_vendor(struct cpuinfo_x86 *c) +void get_cpu_vendor(struct cpuinfo_x86 *c) { char *v = c->x86_vendor_id; int i; @@ -1251,6 +1251,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define GDS BIT(6) /* CPU is affected by Register File Data Sampling */ #define RFDS BIT(7) +/* CPU is affected by Indirect Target Selection */ +#define ITS BIT(8) +/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ +#define ITS_NATIVE_ONLY BIT(9) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), @@ -1262,22 +1266,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), - VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0x5), MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xb), MMIO | RETBLEED | GDS | SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS), VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), - VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), - VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), - VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), - VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), - VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), - VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), - VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY), + VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY), VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), - VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY), VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS), VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS), VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS), @@ -1341,6 +1348,32 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr) return cpu_matches(cpu_vuln_blacklist, RFDS); } +static bool __init vulnerable_to_its(u64 x86_arch_cap_msr) +{ + /* The "immunity" bit trumps everything else: */ + if (x86_arch_cap_msr & ARCH_CAP_ITS_NO) + return false; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return false; + + /* None of the affected CPUs have BHI_CTRL */ + if (boot_cpu_has(X86_FEATURE_BHI_CTRL)) + return false; + + /* + * If a VMM did not expose ITS_NO, assume that a guest could + * be running on a vulnerable hardware or may migrate to such + * hardware. + */ + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return true; + + if (cpu_matches(cpu_vuln_blacklist, ITS)) + return true; + + return false; +} + static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) { u64 x86_arch_cap_msr = x86_read_arch_cap_msr(); @@ -1455,9 +1488,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (vulnerable_to_rfds(x86_arch_cap_msr)) setup_force_cpu_bug(X86_BUG_RFDS); - /* When virtualized, eIBRS could be hidden, assume vulnerable */ - if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) && - !cpu_matches(cpu_vuln_whitelist, NO_BHI) && + /* + * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with + * BHI_NO still need to use the BHI mitigation to prevent Intra-mode + * attacks. When virtualized, eIBRS could be hidden, assume vulnerable. + */ + if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) && (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) || boot_cpu_has(X86_FEATURE_HYPERVISOR))) setup_force_cpu_bug(X86_BUG_BHI); @@ -1465,6 +1501,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET)) setup_force_cpu_bug(X86_BUG_IBPB_NO_RET); + if (vulnerable_to_its(x86_arch_cap_msr)) { + setup_force_cpu_bug(X86_BUG_ITS); + if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY)) + setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); + } + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; @@ -1672,15 +1714,11 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) detect_nopl(); } -void __init early_cpu_init(void) +void __init init_cpu_devs(void) { const struct cpu_dev *const *cdev; int count = 0; -#ifdef CONFIG_PROCESSOR_SELECT - pr_info("KERNEL supported cpus:\n"); -#endif - for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { const struct cpu_dev *cpudev = *cdev; @@ -1688,20 +1726,30 @@ void __init early_cpu_init(void) break; cpu_devs[count] = cpudev; count++; + } +} + +void __init early_cpu_init(void) +{ +#ifdef CONFIG_PROCESSOR_SELECT + unsigned int i, j; + + pr_info("KERNEL supported cpus:\n"); +#endif + + init_cpu_devs(); #ifdef CONFIG_PROCESSOR_SELECT - { - unsigned int j; - - for (j = 0; j < 2; j++) { - if (!cpudev->c_ident[j]) - continue; - pr_info(" %s %s\n", cpudev->c_vendor, - cpudev->c_ident[j]); - } + for (i = 0; i < X86_VENDOR_NUM && cpu_devs[i]; i++) { + for (j = 0; j < 2; j++) { + if (!cpu_devs[i]->c_ident[j]) + continue; + pr_info(" %s %s\n", cpu_devs[i]->c_vendor, + cpu_devs[i]->c_ident[j]); } -#endif } +#endif + early_identify_cpu(&boot_cpu_data); } @@ -1883,6 +1931,13 @@ static void identify_cpu(struct cpuinfo_x86 *c) c->apicid = apic->phys_pkg_id(c->initial_apicid, 0); #endif + + /* + * Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and + * Hygon will clear it in ->c_init() below. + */ + set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); + /* * Vendor-specific initialization. In this section we * canonicalize the feature flags, meaning if there are diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 9651275aecd1..dfec2c61e354 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -153,8 +153,8 @@ static void geode_configure(void) u8 ccr3; local_irq_save(flags); - /* Suspend on halt power saving and enable #SUSP pin */ - setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x88); + /* Suspend on halt power saving */ + setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x08); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 9e8380bd4fb9..8a80d5343f3a 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -347,6 +347,9 @@ static void init_hygon(struct cpuinfo_x86 *c) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); check_null_seg_clears_base(c); + + /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ + clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); } static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 32bd64017047..2c43a1423b09 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -784,26 +784,37 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) } #endif -#define TLB_INST_4K 0x01 -#define TLB_INST_4M 0x02 -#define TLB_INST_2M_4M 0x03 +#define TLB_INST_4K 0x01 +#define TLB_INST_4M 0x02 +#define TLB_INST_2M_4M 0x03 -#define TLB_INST_ALL 0x05 -#define TLB_INST_1G 0x06 +#define TLB_INST_ALL 0x05 +#define TLB_INST_1G 0x06 -#define TLB_DATA_4K 0x11 -#define TLB_DATA_4M 0x12 -#define TLB_DATA_2M_4M 0x13 -#define TLB_DATA_4K_4M 0x14 +#define TLB_DATA_4K 0x11 +#define TLB_DATA_4M 0x12 +#define TLB_DATA_2M_4M 0x13 +#define TLB_DATA_4K_4M 0x14 -#define TLB_DATA_1G 0x16 +#define TLB_DATA_1G 0x16 +#define TLB_DATA_1G_2M_4M 0x17 -#define TLB_DATA0_4K 0x21 -#define TLB_DATA0_4M 0x22 -#define TLB_DATA0_2M_4M 0x23 +#define TLB_DATA0_4K 0x21 +#define TLB_DATA0_4M 0x22 +#define TLB_DATA0_2M_4M 0x23 -#define STLB_4K 0x41 -#define STLB_4K_2M 0x42 +#define STLB_4K 0x41 +#define STLB_4K_2M 0x42 + +/* + * All of leaf 0x2's one-byte TLB descriptors implies the same number of + * entries for their respective TLB types. The 0x63 descriptor is an + * exception: it implies 4 dTLB entries for 1GB pages 32 dTLB entries + * for 2MB or 4MB pages. Encode descriptor 0x63 dTLB entry count for + * 2MB/4MB pages here, as its count for dTLB 1GB pages is already at the + * intel_tlb_table[] mapping. + */ +#define TLB_0x63_2M_4M_ENTRIES 32 static const struct _tlb_table intel_tlb_table[] = { { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, @@ -825,7 +836,8 @@ static const struct _tlb_table intel_tlb_table[] = { { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, - { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, + { 0x63, TLB_DATA_1G_2M_4M, 4, " TLB_DATA 1 GByte pages, 4-way set associative" + " (plus 32 entries TLB_DATA 2 MByte or 4 MByte pages, not encoded here)" }, { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" }, { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" }, { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" }, @@ -925,6 +937,12 @@ static void intel_tlb_lookup(const unsigned char desc) if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; break; + case TLB_DATA_1G_2M_4M: + if (tlb_lld_2m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES) + tlb_lld_2m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES; + if (tlb_lld_4m[ENTRIES] < TLB_0x63_2M_4M_ENTRIES) + tlb_lld_4m[ENTRIES] = TLB_0x63_2M_4M_ENTRIES; + fallthrough; case TLB_DATA_1G: if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; @@ -948,7 +966,7 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c) cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); /* If bit 31 is set, this is an unknown format */ - for (j = 0 ; j < 3 ; j++) + for (j = 0 ; j < 4 ; j++) if (regs[j] & (1 << 31)) regs[j] = 0; @@ -1186,7 +1204,13 @@ static void __split_lock_reenable(struct work_struct *work) { sld_update_msr(true); } -static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable); +/* + * In order for each CPU to schedule its delayed work independently of the + * others, delayed work struct must be per-CPU. This is not required when + * sysctl_sld_mitigate is enabled because of the semaphore that limits + * the number of simultaneously scheduled delayed works to 1. + */ +static DEFINE_PER_CPU(struct delayed_work, sl_reenable); /* * If a CPU goes offline with pending delayed work to re-enable split lock @@ -1207,7 +1231,7 @@ static int splitlock_cpu_offline(unsigned int cpu) static void split_lock_warn(unsigned long ip) { - struct delayed_work *work; + struct delayed_work *work = NULL; int cpu; if (!current->reported_split_lock) @@ -1229,11 +1253,17 @@ static void split_lock_warn(unsigned long ip) if (down_interruptible(&buslock_sem) == -EINTR) return; work = &sl_reenable_unlock; - } else { - work = &sl_reenable; } cpu = get_cpu(); + + if (!work) { + work = this_cpu_ptr(&sl_reenable); + /* Deferred initialization of per-CPU struct */ + if (!work->work.func) + INIT_DELAYED_WORK(work, __split_lock_reenable); + } + schedule_delayed_work_on(cpu, work, 2); /* Disable split lock detection on this CPU to make progress */ diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 9a3092ec9b27..b8d6e616fc50 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -867,7 +867,7 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz return ret; } - for_each_node(nid) { + for_each_node_with_cpus(nid) { cpu = cpumask_first(cpumask_of_node(nid)); c = &cpu_data(cpu); diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 542b818c0d20..100fed2afbe7 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -152,6 +151,63 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs) hyperv_cleanup(); } #endif /* CONFIG_KEXEC_CORE */ + +static u64 hv_ref_counter_at_suspend; +static void (*old_save_sched_clock_state)(void); +static void (*old_restore_sched_clock_state)(void); + +/* + * Hyper-V clock counter resets during hibernation. Save and restore clock + * offset during suspend/resume, while also considering the time passed + * before suspend. This is to make sure that sched_clock using hv tsc page + * based clocksource, proceeds from where it left off during suspend and + * it shows correct time for the timestamps of kernel messages after resume. + */ +static void save_hv_clock_tsc_state(void) +{ + hv_ref_counter_at_suspend = hv_read_reference_counter(); +} + +static void restore_hv_clock_tsc_state(void) +{ + /* + * Adjust the offsets used by hv tsc clocksource to + * account for the time spent before hibernation. + * adjusted value = reference counter (time) at suspend + * - reference counter (time) now. + */ + hv_adj_sched_clock_offset(hv_ref_counter_at_suspend - hv_read_reference_counter()); +} + +/* + * Functions to override save_sched_clock_state and restore_sched_clock_state + * functions of x86_platform. The Hyper-V clock counter is reset during + * suspend-resume and the offset used to measure time needs to be + * corrected, post resume. + */ +static void hv_save_sched_clock_state(void) +{ + old_save_sched_clock_state(); + save_hv_clock_tsc_state(); +} + +static void hv_restore_sched_clock_state(void) +{ + restore_hv_clock_tsc_state(); + old_restore_sched_clock_state(); +} + +static void __init x86_setup_ops_for_tsc_pg_clock(void) +{ + if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) + return; + + old_save_sched_clock_state = x86_platform.save_sched_clock_state; + x86_platform.save_sched_clock_state = hv_save_sched_clock_state; + + old_restore_sched_clock_state = x86_platform.restore_sched_clock_state; + x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state; +} #endif /* CONFIG_HYPERV */ static uint32_t __init ms_hyperv_platform(void) @@ -404,16 +460,6 @@ static void __init ms_hyperv_init_platform(void) if (efi_enabled(EFI_BOOT)) x86_platform.get_nmi_reason = hv_get_nmi_reason; - /* - * Hyper-V VMs have a PIT emulation quirk such that zeroing the - * counter register during PIT shutdown restarts the PIT. So it - * continues to interrupt @18.2 HZ. Setting i8253_clear_counter - * to false tells pit_shutdown() not to zero the counter so that - * the PIT really is shutdown. Generation 2 VMs don't have a PIT, - * and setting this value has no effect. - */ - i8253_clear_counter_on_shutdown = false; - #if IS_ENABLED(CONFIG_HYPERV) /* * Setup the hook to get control post apic initialization. @@ -454,6 +500,7 @@ static void __init ms_hyperv_init_platform(void) /* Register Hyper-V specific clocksource */ hv_init_clocksource(); + x86_setup_ops_for_tsc_pg_clock(); #endif /* * TSC should be marked as unstable only after Hyper-V diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c index aa9b8b868867..afccb69cd9a2 100644 --- a/arch/x86/kernel/cpu/sgx/driver.c +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -150,13 +150,15 @@ int __init sgx_drv_init(void) u64 xfrm_mask; int ret; - if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) + if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) { + pr_info("SGX disabled: SGX launch control CPU feature is not available, /dev/sgx_enclave disabled.\n"); return -ENODEV; + } cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx); if (!(eax & 1)) { - pr_err("SGX disabled: SGX1 instruction support not available.\n"); + pr_info("SGX disabled: SGX1 instruction support not available, /dev/sgx_enclave disabled.\n"); return -ENODEV; } @@ -173,8 +175,10 @@ int __init sgx_drv_init(void) } ret = misc_register(&sgx_dev_enclave); - if (ret) + if (ret) { + pr_info("SGX disabled: Unable to register the /dev/sgx_enclave driver (%d).\n", ret); return ret; + } return 0; } diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c index da8b8ea6b063..9634ac0fef1d 100644 --- a/arch/x86/kernel/cpu/sgx/ioctl.c +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -64,6 +64,13 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) struct file *backing; long ret; + /* + * ECREATE would detect this too, but checking here also ensures + * that the 'encl_size' calculations below can never overflow. + */ + if (!is_power_of_2(secs->size)) + return -EINVAL; + va_page = sgx_encl_grow(encl, true); if (IS_ERR(va_page)) return PTR_ERR(va_page); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index f18ca44c904b..52dc5839d1e8 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -195,6 +195,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, printk("%sCall Trace:\n", log_lvl); unwind_start(&state, task, regs, stack); + stack = stack ?: get_stack_pointer(task, regs); regs = unwind_get_entry_regs(&state, &partial); /* @@ -213,9 +214,7 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, * - hardirq stack * - entry stack */ - for (stack = stack ?: get_stack_pointer(task, regs); - stack; - stack = stack_info.next_sp) { + for (; stack; stack = stack_info.next_sp) { const char *stack_name; stack = PTR_ALIGN(stack, sizeof(long)); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 993734e96615..b87a3f8ccb32 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len) void __init e820__register_nosave_regions(unsigned long limit_pfn) { int i; - unsigned long pfn = 0; + u64 last_addr = 0; for (i = 0; i < e820_table->nr_entries; i++) { struct e820_entry *entry = &e820_table->entries[i]; - if (pfn < PFN_UP(entry->addr)) - register_nosave_region(pfn, PFN_UP(entry->addr)); - - pfn = PFN_DOWN(entry->addr + entry->size); - if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN) - register_nosave_region(PFN_UP(entry->addr), pfn); + continue; - if (pfn >= limit_pfn) - break; + if (last_addr < entry->addr) + register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr)); + + last_addr = entry->addr + entry->size; } + + register_nosave_region(PFN_DOWN(last_addr), limit_pfn); } #ifdef CONFIG_ACPI diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index f1446f532b17..80ba0f81a44a 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -220,7 +220,7 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) struct fpstate *fpstate; unsigned int size; - size = fpu_user_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); + size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64); fpstate = vzalloc(size); if (!fpstate) return false; @@ -232,8 +232,8 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu) fpstate->is_guest = true; gfpu->fpstate = fpstate; - gfpu->xfeatures = fpu_user_cfg.default_features; - gfpu->perm = fpu_user_cfg.default_features; + gfpu->xfeatures = fpu_kernel_cfg.default_features; + gfpu->perm = fpu_kernel_cfg.default_features; /* * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index ec51ce713dea..48cf91625a16 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -360,7 +360,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) goto fail; ip = trampoline + size; - if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) + if (cpu_wants_rethunk_at(ip)) __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE); else memcpy(ip, retq, sizeof(retq)); @@ -415,8 +415,6 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) /* ALLOC_TRAMP flags lets us know we created it */ ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP; - set_vm_flush_reset_perms(trampoline); - if (likely(system_state != SYSTEM_BOOTING)) set_memory_ro((unsigned long)trampoline, npages); set_memory_x((unsigned long)trampoline, npages); diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 2b7999a1a50a..cb9852ad6098 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -39,9 +40,16 @@ static bool __init use_pit(void) bool __init pit_timer_init(void) { - if (!use_pit()) + if (!use_pit()) { + /* + * Don't just ignore the PIT. Ensure it's stopped, because + * VMMs otherwise steal CPU time just to pointlessly waggle + * the (masked) IRQ. + */ + scoped_guard(irq) + clockevent_i8253_disable(); return false; - + } clockevent_i8253_init(true); global_clock_event = &i8253_clockevent; return true; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 766ffe3ba313..439fdb3f5fdf 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -23,8 +23,10 @@ #include #include +#if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR) #define CREATE_TRACE_POINTS #include +#endif DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 991f00c817e6..180c708879d2 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -427,7 +427,6 @@ void *alloc_insn_page(void) if (!page) return NULL; - set_vm_flush_reset_perms(page); /* * First make the page read-only, and only then make it executable to * prevent it from being W+X in between. diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index c032edcd3d95..c34ea5e028c4 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -74,10 +74,11 @@ void *module_alloc(unsigned long size) return NULL; p = __vmalloc_node_range(size, MODULE_ALIGN, - MODULES_VADDR + get_module_load_offset(), - MODULES_END, gfp_mask, - PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE, - __builtin_return_address(0)); + MODULES_VADDR + get_module_load_offset(), + MODULES_END, gfp_mask, PAGE_KERNEL, + VM_FLUSH_RESET_PERMS | VM_DEFER_KMEMLEAK, + NUMA_NO_NODE, __builtin_return_address(0)); + if (p && (kasan_alloc_module_shadow(p, size, gfp_mask) < 0)) { vfree(p); return NULL; @@ -285,10 +286,16 @@ int module_finalize(const Elf_Ehdr *hdr, void *pseg = (void *)para->sh_addr; apply_paravirt(pseg, pseg + para->sh_size); } + + its_init_mod(me); + if (retpolines) { void *rseg = (void *)retpolines->sh_addr; apply_retpolines(rseg, rseg + retpolines->sh_size); } + + its_fini_mod(me); + if (returns) { void *rseg = (void *)returns->sh_addr; apply_returns(rseg, rseg + returns->sh_size); @@ -320,4 +327,5 @@ int module_finalize(const Elf_Ehdr *hdr, void module_arch_cleanup(struct module *mod) { alternatives_smp_module_del(mod); + its_free_mod(mod); } diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index ed6cce6c3950..b9a128546970 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -38,8 +38,12 @@ #define CREATE_TRACE_POINTS #include +/* + * An emergency handler can be set in any context including NMI + */ struct nmi_desc { raw_spinlock_t lock; + nmi_handler_t emerg_handler; struct list_head head; }; @@ -121,9 +125,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration) static int nmi_handle(unsigned int type, struct pt_regs *regs) { struct nmi_desc *desc = nmi_to_desc(type); + nmi_handler_t ehandler; struct nmiaction *a; int handled=0; + /* + * Call the emergency handler, if set + * + * In the case of crash_nmi_callback() emergency handler, it will + * return in the case of the crashing CPU to enable it to complete + * other necessary crashing actions ASAP. Other handlers in the + * linked list won't need to be run. + */ + ehandler = desc->emerg_handler; + if (ehandler) + return ehandler(type, regs); + rcu_read_lock(); /* @@ -213,6 +230,31 @@ void unregister_nmi_handler(unsigned int type, const char *name) } EXPORT_SYMBOL_GPL(unregister_nmi_handler); +/** + * set_emergency_nmi_handler - Set emergency handler + * @type: NMI type + * @handler: the emergency handler to be stored + * + * Set an emergency NMI handler which, if set, will preempt all the other + * handlers in the linked list. If a NULL handler is passed in, it will clear + * it. It is expected that concurrent calls to this function will not happen + * or the system is screwed beyond repair. + */ +void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler) +{ + struct nmi_desc *desc = nmi_to_desc(type); + + if (WARN_ON_ONCE(desc->emerg_handler == handler)) + return; + desc->emerg_handler = handler; + + /* + * Ensure the emergency handler is visible to other CPUs before + * function return + */ + smp_wmb(); +} + static void pci_serr_error(unsigned char reason, struct pt_regs *regs) { diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index acc83738bf5b..24c237c57198 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -87,7 +87,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { - memcpy(dst, src, arch_task_struct_size); + /* init_task is not dynamically sized (incomplete FPU state) */ + if (unlikely(src == &init_task)) + memcpy_and_pad(dst, arch_task_struct_size, src, sizeof(init_task), 0); + else + memcpy(dst, src, arch_task_struct_size); + #ifdef CONFIG_VM86 dst->thread.vm86 = NULL; #endif diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 299b970e5f82..d9dbcd1cf75f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -896,15 +896,11 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) shootdown_callback = callback; atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); - /* Would it be better to replace the trap vector here? */ - if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback, - NMI_FLAG_FIRST, "crash")) - return; /* Return what? */ + /* - * Ensure the new callback function is set before sending - * out the NMI + * Set emergency handler to preempt other handlers. */ - wmb(); + set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback); apic_send_IPI_allbutself(NMI_VECTOR); diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index b32134b093ec..93809a4dc793 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -79,7 +79,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, break; case RET: - if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) + if (cpu_wants_rethunk_at(insn)) code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk); else code = &retinsn; @@ -170,6 +170,14 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) } EXPORT_SYMBOL_GPL(arch_static_call_transform); +noinstr void __static_call_update_early(void *tramp, void *func) +{ + BUG_ON(system_state != SYSTEM_BOOTING); + BUG_ON(static_call_initialized); + __text_gen_insn(tramp, JMP32_INSN_OPCODE, tramp, func, JMP32_INSN_SIZE); + sync_core(); +} + #ifdef CONFIG_RETHUNK /* * This is called by apply_returns() to fix up static call trampolines, diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index cafacb2e58cc..e5ec68ce938a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -920,7 +920,7 @@ static unsigned long long cyc2ns_suspend; void tsc_save_sched_clock_state(void) { - if (!sched_clock_stable()) + if (!static_branch_likely(&__use_tsc) && !sched_clock_stable()) return; cyc2ns_suspend = sched_clock(); @@ -940,7 +940,7 @@ void tsc_restore_sched_clock_state(void) unsigned long flags; int cpu; - if (!sched_clock_stable()) + if (!static_branch_likely(&__use_tsc) && !sched_clock_stable()) return; local_irq_save(flags); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 78ccb5ec3c0e..35aeec9d2267 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -528,6 +528,16 @@ INIT_PER_CPU(irq_stack_backing_store); "SRSO function pair won't alias"); #endif +#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B) +. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline"); +. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart"); +. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array"); +#endif + +#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B) +. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline"); +#endif + #endif /* CONFIG_X86_64 */ #ifdef CONFIG_KEXEC_CORE diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 3818c85cf964..43b1df7ec183 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -33,6 +33,26 @@ u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly; EXPORT_SYMBOL_GPL(kvm_cpu_caps); +struct cpuid_xstate_sizes { + u32 eax; + u32 ebx; + u32 ecx; +}; + +static struct cpuid_xstate_sizes xstate_sizes[XFEATURE_MAX] __ro_after_init; + +void __init kvm_init_xstate_sizes(void) +{ + u32 ign; + int i; + + for (i = XFEATURE_YMM; i < ARRAY_SIZE(xstate_sizes); i++) { + struct cpuid_xstate_sizes *xs = &xstate_sizes[i]; + + cpuid_count(0xD, i, &xs->eax, &xs->ebx, &xs->ecx, &ign); + } +} + u32 xstate_required_size(u64 xstate_bv, bool compacted) { int feature_bit = 0; @@ -41,14 +61,15 @@ u32 xstate_required_size(u64 xstate_bv, bool compacted) xstate_bv &= XFEATURE_MASK_EXTEND; while (xstate_bv) { if (xstate_bv & 0x1) { - u32 eax, ebx, ecx, edx, offset; - cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx); + struct cpuid_xstate_sizes *xs = &xstate_sizes[feature_bit]; + u32 offset; + /* ECX[1]: 64B alignment in compacted form */ if (compacted) - offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret; + offset = (xs->ecx & 0x2) ? ALIGN(ret, 64) : ret; else - offset = ebx; - ret = max(ret, offset + eax); + offset = xs->ebx; + ret = max(ret, offset + xs->eax); } xstate_bv >>= 1; diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 18fd2e845989..4be98e1c4cd7 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -32,6 +32,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool exact_only); +void __init kvm_init_xstate_sizes(void); u32 xstate_required_size(u64 xstate_bv, bool compacted); int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 04cca46fed1e..28555bbd52e8 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1915,6 +1915,9 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) u32 vector; bool all_cpus; + if (!lapic_in_kernel(vcpu)) + return HV_STATUS_INVALID_HYPERCALL_INPUT; + if (hc->code == HVCALL_SEND_IPI) { if (!hc->fast) { if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi, @@ -2518,7 +2521,8 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; - ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; + if (!vcpu || lapic_in_kernel(vcpu)) + ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; if (evmcs_ver) ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 246374cb3ed3..7f57dce5c828 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2453,19 +2453,26 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; - if (apic->apicv_active) { - /* irr_pending is always true when apicv is activated. */ - apic->irr_pending = true; + /* + * When APICv is enabled, KVM must always search the IRR for a pending + * IRQ, as other vCPUs and devices can set IRR bits even if the vCPU + * isn't running. If APICv is disabled, KVM _should_ search the IRR + * for a pending IRQ. But KVM currently doesn't ensure *all* hardware, + * e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching + * the IRR at this time could race with IRQ delivery from hardware that + * still sees APICv as being enabled. + * + * FIXME: Ensure other vCPUs and devices observe the change in APICv + * state prior to updating KVM's metadata caches, so that KVM + * can safely search the IRR and set irr_pending accordingly. + */ + apic->irr_pending = true; + + if (apic->apicv_active) apic->isr_count = 1; - } else { - /* - * Don't clear irr_pending, searching the IRR can race with - * updates from the CPU as APICv is still active from hardware's - * perspective. The flag will be cleared as appropriate when - * KVM injects the interrupt. - */ + else apic->isr_count = count_vectors(apic->regs + APIC_ISR); - } + apic->highest_isr_cache = -1; } EXPORT_SYMBOL_GPL(kvm_apic_update_apicv); diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 13134954e24d..2fa130c4c173 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4245,6 +4245,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu); unsigned long mmu_seq; + kvm_pfn_t orig_pfn; int r; fault->gfn = fault->addr >> PAGE_SHIFT; @@ -4272,6 +4273,8 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault if (r != RET_PF_CONTINUE) return r; + orig_pfn = fault->pfn; + r = RET_PF_RETRY; if (is_tdp_mmu_fault) @@ -4296,7 +4299,7 @@ out_unlock: read_unlock(&vcpu->kvm->mmu_lock); else write_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(fault->pfn); + kvm_release_pfn_clean(orig_pfn); return r; } @@ -5147,7 +5150,7 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, union kvm_mmu_page_role root_role; /* NPT requires CR0.PG=1. */ - WARN_ON_ONCE(cpu_role.base.direct); + WARN_ON_ONCE(cpu_role.base.direct || !cpu_role.base.guest_mode); root_role = cpu_role.base; root_role.level = kvm_mmu_get_tdp_level(vcpu); diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index 1f4f5e703f13..685560a45bf6 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -790,6 +790,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { struct guest_walker walker; + kvm_pfn_t orig_pfn; int r; unsigned long mmu_seq; bool is_self_change_mapping; @@ -868,6 +869,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault walker.pte_access &= ~ACC_EXEC_MASK; } + orig_pfn = fault->pfn; + r = RET_PF_RETRY; write_lock(&vcpu->kvm->mmu_lock); @@ -881,7 +884,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault out_unlock: write_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(fault->pfn); + kvm_release_pfn_clean(orig_pfn); return r; } diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 2e08b2a45361..a94735eff2cb 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -194,12 +194,20 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; /* - * Optimization: for pte sync, if spte was writable the hash - * lookup is unnecessary (and expensive). Write protection - * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots. - * Same reasoning can be applied to dirty page accounting. + * When overwriting an existing leaf SPTE, and the old SPTE was + * writable, skip trying to unsync shadow pages as any relevant + * shadow pages must already be unsync, i.e. the hash lookup is + * unnecessary (and expensive). + * + * The same reasoning applies to dirty page/folio accounting; + * KVM will mark the folio dirty using the old SPTE, thus + * there's no need to immediately mark the new SPTE as dirty. + * + * Note, both cases rely on KVM not changing PFNs without first + * zapping the old SPTE, which is guaranteed by both the shadow + * MMU and the TDP MMU. */ - if (is_writable_pte(old_spte)) + if (is_last_spte(old_spte, level) && is_writable_pte(old_spte)) goto out; /* diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index fb125b54ee68..0adbf0677b7c 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -839,7 +839,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) * Allocating new amd_iommu_pi_data, which will get * add to the per-vcpu ir_list. */ - ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT); + ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT); if (!ir) { ret = -ENOMEM; goto out; @@ -915,6 +915,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; + bool enable_remapped_mode = true; int idx, ret = 0; if (!kvm_arch_has_assigned_device(kvm) || @@ -952,6 +953,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, kvm_vcpu_apicv_active(&svm->vcpu)) { struct amd_iommu_pi_data pi; + enable_remapped_mode = false; + /* Try to enable guest_mode in IRTE */ pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & AVIC_HPA_MASK); @@ -970,33 +973,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, */ if (!ret && pi.is_guest_mode) svm_ir_list_add(svm, &pi); - } else { - /* Use legacy mode in IRTE */ - struct amd_iommu_pi_data pi; - - /** - * Here, pi is used to: - * - Tell IOMMU to use legacy mode for this interrupt. - * - Retrieve ga_tag of prior interrupt remapping data. - */ - pi.prev_ga_tag = 0; - pi.is_guest_mode = false; - ret = irq_set_vcpu_affinity(host_irq, &pi); - - /** - * Check if the posted interrupt was previously - * setup with the guest_mode by checking if the ga_tag - * was cached. If so, we need to clean up the per-vcpu - * ir_list. - */ - if (!ret && pi.prev_ga_tag) { - int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag); - struct kvm_vcpu *vcpu; - - vcpu = kvm_get_vcpu_by_id(kvm, id); - if (vcpu) - svm_ir_list_del(to_svm(vcpu), &pi); - } } if (!ret && svm) { @@ -1012,6 +988,34 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq, } ret = 0; + if (enable_remapped_mode) { + /* Use legacy mode in IRTE */ + struct amd_iommu_pi_data pi; + + /** + * Here, pi is used to: + * - Tell IOMMU to use legacy mode for this interrupt. + * - Retrieve ga_tag of prior interrupt remapping data. + */ + pi.prev_ga_tag = 0; + pi.is_guest_mode = false; + ret = irq_set_vcpu_affinity(host_irq, &pi); + + /** + * Check if the posted interrupt was previously + * setup with the guest_mode by checking if the ga_tag + * was cached. If so, we need to clean up the per-vcpu + * ir_list. + */ + if (!ret && pi.prev_ga_tag) { + int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag); + struct kvm_vcpu *vcpu; + + vcpu = kvm_get_vcpu_by_id(kvm, id); + if (vcpu) + svm_ir_list_del(to_svm(vcpu), &pi); + } + } out: srcu_read_unlock(&kvm->irq_srcu, idx); return ret; diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 974f30d38c79..b889dc6e20a7 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -619,6 +619,11 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, u32 pause_count12; u32 pause_thresh12; + nested_svm_transition_tlb_flush(vcpu); + + /* Enter Guest-Mode */ + enter_guest_mode(vcpu); + /* * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2, * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes. @@ -717,11 +722,6 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, } } - nested_svm_transition_tlb_flush(vcpu); - - /* Enter Guest-Mode */ - enter_guest_mode(vcpu); - /* * Merge guest and host intercepts - must be called with vcpu in * guest-mode to take effect. diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index a96facc05139..920e4a3583ab 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1920,11 +1920,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) svm->asid = sd->next_asid++; } -static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) +static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value) { - struct vmcb *vmcb = svm->vmcb; + struct vmcb *vmcb = to_svm(vcpu)->vmcb; - if (svm->vcpu.arch.guest_state_protected) + if (vcpu->arch.guest_state_protected) return; if (unlikely(value != vmcb->save.dr6)) { @@ -3039,6 +3039,18 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) __func__, data); break; } + + /* + * AMD changed the architectural behavior of bits 5:2. On CPUs + * without BusLockTrap, bits 5:2 control "external pins", but + * on CPUs that support BusLockDetect, bit 2 enables BusLockTrap + * and bits 5:3 are reserved-to-zero. Sadly, old KVM allowed + * the guest to set bits 5:2 despite not actually virtualizing + * Performance-Monitoring/Breakpoint external pins. Drop bits + * 5:2 for backwards compatibility. + */ + data &= ~GENMASK(5, 2); + if (data & DEBUGCTL_RESERVED_BITS) return 1; @@ -4023,10 +4035,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) * Run with all-zero DR6 unless needed, so that we can get the exact cause * of a #DB. */ - if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) - svm_set_dr6(svm, vcpu->arch.dr6); - else - svm_set_dr6(svm, DR6_ACTIVE_LOW); + if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))) + svm_set_dr6(vcpu, DR6_ACTIVE_LOW); clgi(); kvm_load_guest_xsave_state(vcpu); @@ -4795,6 +4805,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .set_idt = svm_set_idt, .get_gdt = svm_get_gdt, .set_gdt = svm_set_gdt, + .set_dr6 = svm_set_dr6, .set_dr7 = svm_set_dr7, .sync_dirty_debug_regs = svm_sync_dirty_debug_regs, .cache_reg = svm_cache_reg, diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 4cb1425900c6..a7f2faea8858 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -539,7 +539,7 @@ static inline bool is_x2apic_msrpm_offset(u32 offset) /* svm.c */ #define MSR_INVALID 0xffffffffU -#define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) +#define DEBUGCTL_RESERVED_BITS (~(DEBUGCTLMSR_BTF | DEBUGCTLMSR_LBR)) extern bool dump_invalid_vmcb; diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 2283f485a81f..8052f8b7d8e1 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1126,11 +1126,14 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx = to_vmx(vcpu); /* - * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings - * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a - * full TLB flush from the guest's perspective. This is required even - * if VPID is disabled in the host as KVM may need to synchronize the - * MMU in response to the guest TLB flush. + * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the + * same VPID as the host, and so architecturally, linear and combined + * mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM + * emulates L2 sharing L1's VPID=0 by using vpid01 while running L2, + * and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This + * is required if VPID is disabled in KVM, as a TLB flush (there are no + * VPIDs) still occurs from L1's perspective, and KVM may need to + * synchronize the MMU in response to the guest TLB flush. * * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use. * EPT is a special snowflake, as guest-physical mappings aren't @@ -2196,6 +2199,17 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA); + /* + * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the + * same VPID as the host. Emulate this behavior by using vpid01 for L2 + * if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter + * and VM-Exit are architecturally required to flush VPID=0, but *only* + * VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the + * required flushes), but doing so would cause KVM to over-flush. E.g. + * if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled, + * and then runs L2 X again, then KVM can and should retain TLB entries + * for VPID12=1. + */ if (enable_vpid) { if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); @@ -5758,6 +5772,12 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID); + /* + * Always flush the effective vpid02, i.e. never flush the current VPID + * and never explicitly flush vpid01. INVVPID targets a VPID, not a + * VMCS, and so whether or not the current vmcs12 has VPID enabled is + * irrelevant (and there may not be a loaded vmcs12). + */ vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c index 1b56c5e5c9fb..0d04382f37af 100644 --- a/arch/x86/kvm/vmx/posted_intr.c +++ b/arch/x86/kvm/vmx/posted_intr.c @@ -272,6 +272,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, { struct kvm_kernel_irq_routing_entry *e; struct kvm_irq_routing_table *irq_rt; + bool enable_remapped_mode = true; struct kvm_lapic_irq irq; struct kvm_vcpu *vcpu; struct vcpu_data vcpu_info; @@ -310,21 +311,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, kvm_set_msi_irq(kvm, e, &irq); if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) || - !kvm_irq_is_postable(&irq)) { - /* - * Make sure the IRTE is in remapped mode if - * we don't handle it in posted mode. - */ - ret = irq_set_vcpu_affinity(host_irq, NULL); - if (ret < 0) { - printk(KERN_INFO - "failed to back to remapped mode, irq: %u\n", - host_irq); - goto out; - } - + !kvm_irq_is_postable(&irq)) continue; - } vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu)); vcpu_info.vector = irq.vector; @@ -332,11 +320,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi, vcpu_info.vector, vcpu_info.pi_desc_addr, set); - if (set) - ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); - else - ret = irq_set_vcpu_affinity(host_irq, NULL); + if (!set) + continue; + enable_remapped_mode = false; + + ret = irq_set_vcpu_affinity(host_irq, &vcpu_info); if (ret < 0) { printk(KERN_INFO "%s: failed to update PI IRTE\n", __func__); @@ -344,6 +333,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq, } } + if (enable_remapped_mode) + ret = irq_set_vcpu_affinity(host_irq, NULL); + ret = 0; out: srcu_read_unlock(&kvm->irq_srcu, idx); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 87abf4eebf8a..6bd25401bc01 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -209,9 +209,11 @@ module_param(ple_window_shrink, uint, 0444); static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX; module_param(ple_window_max, uint, 0444); -/* Default is SYSTEM mode, 1 for host-guest mode */ +/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */ int __read_mostly pt_mode = PT_MODE_SYSTEM; +#ifdef CONFIG_BROKEN module_param(pt_mode, int, S_IRUGO); +#endif static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); @@ -3098,7 +3100,7 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu) static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu) { - if (is_guest_mode(vcpu)) + if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu))) return nested_get_vpid02(vcpu); return to_vmx(vcpu)->vpid; } @@ -5534,6 +5536,12 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu) set_debugreg(DR6_RESERVED, 6); } +static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val) +{ + lockdep_assert_irqs_disabled(); + set_debugreg(vcpu->arch.dr6, 6); +} + static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) { vmcs_writel(GUEST_DR7, val); @@ -7218,10 +7226,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) vmx->loaded_vmcs->host_state.cr4 = cr4; } - /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ - if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) - set_debugreg(vcpu->arch.dr6, 6); - /* When single-stepping over STI and MOV SS, we must clear the * corresponding interruptibility bits in the guest state. Otherwise * vmentry fails as it then expects bit 14 (BS) in pending debug @@ -8166,6 +8170,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .set_idt = vmx_set_idt, .get_gdt = vmx_get_gdt, .set_gdt = vmx_set_gdt, + .set_dr6 = vmx_set_dr6, .set_dr7 = vmx_set_dr7, .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, .cache_reg = vmx_cache_reg, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4e778ba6a5d4..a6dc8f662fa4 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1614,7 +1614,7 @@ static unsigned int num_msr_based_features; ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \ - ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO) + ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO) static u64 kvm_get_arch_capabilities(void) { @@ -1653,6 +1653,8 @@ static u64 kvm_get_arch_capabilities(void) data |= ARCH_CAP_MDS_NO; if (!boot_cpu_has_bug(X86_BUG_RFDS)) data |= ARCH_CAP_RFDS_NO; + if (!boot_cpu_has_bug(X86_BUG_ITS)) + data |= ARCH_CAP_ITS_NO; if (!boot_cpu_has(X86_FEATURE_RTM)) { /* @@ -9712,7 +9714,7 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu) { u64 ret = vcpu->run->hypercall.ret; - if (!is_64_bit_mode(vcpu)) + if (!is_64_bit_hypercall(vcpu)) ret = (u32)ret; kvm_rax_write(vcpu, ret); ++vcpu->stat.hypercalls; @@ -10841,6 +10843,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[3], 3); + /* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */ + if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) + static_call(kvm_x86_set_dr6)(vcpu, vcpu->arch.dr6); } else if (unlikely(hw_breakpoint_active())) { set_debugreg(0, 7); } @@ -11460,6 +11465,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, if (kvm_mpx_supported()) kvm_load_guest_fpu(vcpu); + kvm_vcpu_srcu_read_lock(vcpu); + r = kvm_apic_accept_events(vcpu); if (r < 0) goto out; @@ -11473,6 +11480,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, mp_state->mp_state = vcpu->arch.mp_state; out: + kvm_vcpu_srcu_read_unlock(vcpu); + if (kvm_mpx_supported()) kvm_put_guest_fpu(vcpu); vcpu_put(vcpu); @@ -13422,7 +13431,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, struct kvm_kernel_irq_routing_entry *new) { - if (new->type != KVM_IRQ_ROUTING_MSI) + if (old->type != KVM_IRQ_ROUTING_MSI || + new->type != KVM_IRQ_ROUTING_MSI) return true; return !!memcmp(&old->msi, &new->msi, sizeof(new->msi)); @@ -13820,6 +13830,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit); static int __init kvm_x86_init(void) { + kvm_init_xstate_sizes(); + kvm_mmu_x86_module_init(); mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible(); return 0; diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 7880e2a7ec6a..e42661500094 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -258,6 +258,45 @@ SYM_FUNC_START(entry_untrain_ret) SYM_FUNC_END(entry_untrain_ret) __EXPORT_THUNK(entry_untrain_ret) +#ifdef CONFIG_MITIGATION_ITS + +.macro ITS_THUNK reg + +SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL) + UNWIND_HINT_EMPTY + ANNOTATE_NOENDBR + ANNOTATE_RETPOLINE_SAFE + jmp *%\reg + int3 + .align 32, 0xcc /* fill to the end of the line */ + .skip 32, 0xcc /* skip to the next upper half */ +.endm + +/* ITS mitigation requires thunks be aligned to upper half of cacheline */ +.align 64, 0xcc +.skip 32, 0xcc +SYM_CODE_START(__x86_indirect_its_thunk_array) + +#define GEN(reg) ITS_THUNK reg +#include +#undef GEN + + .align 64, 0xcc +SYM_CODE_END(__x86_indirect_its_thunk_array) + +.align 64, 0xcc +.skip 32, 0xcc +SYM_CODE_START(its_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_CODE_END(its_return_thunk) +EXPORT_SYMBOL(its_return_thunk) + +#endif /* CONFIG_MITIGATION_ITS */ + SYM_CODE_START(__x86_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index ed861ef33f80..446bf7fbc325 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -263,28 +263,33 @@ static void __init probe_page_size_mask(void) } /* - * INVLPG may not properly flush Global entries - * on these CPUs when PCIDs are enabled. + * INVLPG may not properly flush Global entries on + * these CPUs. New microcode fixes the issue. */ static const struct x86_cpu_id invlpg_miss_ids[] = { - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 0), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 0), - X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, 0), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 0), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 0), - X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, 0), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 0x2e), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 0x42c), + X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, 0x11), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 0x118), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 0x4117), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, 0x2e), {} }; static void setup_pcid(void) { + const struct x86_cpu_id *invlpg_miss_match; + if (!IS_ENABLED(CONFIG_X86_64)) return; if (!boot_cpu_has(X86_FEATURE_PCID)) return; - if (x86_match_cpu(invlpg_miss_ids)) { + invlpg_miss_match = x86_match_cpu(invlpg_miss_ids); + + if (invlpg_miss_match && + boot_cpu_data.microcode < invlpg_miss_match->driver_data) { pr_info("Incomplete global flushes, disabling PCID"); setup_clear_cpu_cap(X86_FEATURE_PCID); return; @@ -649,8 +654,13 @@ static void __init memory_map_top_down(unsigned long map_start, */ addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, map_end); - memblock_phys_free(addr, PMD_SIZE); - real_end = addr + PMD_SIZE; + if (!addr) { + pr_warn("Failed to release memory for alloc_low_pages()"); + real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE)); + } else { + memblock_phys_free(addr, PMD_SIZE); + real_end = addr + PMD_SIZE; + } /* step_size need to be small so pgt_buf from BRK could cover it */ step_size = PMD_SIZE; diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 851711509d38..9d75b89fa257 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -959,9 +959,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, ret = __add_pages(nid, start_pfn, nr_pages, params); WARN_ON_ONCE(ret); - /* update max_pfn, max_low_pfn and high_memory */ - update_end_of_memory_vars(start_pfn << PAGE_SHIFT, - nr_pages << PAGE_SHIFT); + /* + * Special case: add_pages() is called by memremap_pages() for adding device + * private pages. Do not bump up max_pfn in the device private path, + * because max_pfn changes affect dma_addressing_limited(). + * + * dma_addressing_limited() returning true when max_pfn is the device's + * addressable memory can force device drivers to use bounce buffers + * and impact their performance negatively: + */ + if (!params->pgmap) + /* update max_pfn, max_low_pfn and high_memory */ + update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); return ret; } diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 6453fbaedb08..aa5234034c50 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -650,7 +650,8 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, paddr_next = data->next; len = data->len; - if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { + if ((phys_addr > paddr) && + (phys_addr < (paddr + sizeof(struct setup_data) + len))) { memunmap(data); return true; } @@ -712,7 +713,8 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, paddr_next = data->next; len = data->len; - if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { + if ((phys_addr > paddr) && + (phys_addr < (paddr + sizeof(struct setup_data) + len))) { early_memunmap(data, sizeof(*data)); return true; } diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 230f1dee4f09..e0b0ec0f8245 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -109,8 +109,14 @@ void __init kernel_randomize_memory(void) memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; - /* Adapt physical memory region size based on available memory */ - if (memory_tb < kaslr_regions[0].size_tb) + /* + * Adapt physical memory region size based on available memory, + * except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the + * device BAR space assuming the direct map space is large enough + * for creating a ZONE_DEVICE mapping in the direct map corresponding + * to the physical BAR address. + */ + if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb)) kaslr_regions[0].size_tb = memory_tb; /* diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index f17609884874..839d8a4a1cd0 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -588,7 +588,7 @@ void __head sme_enable(struct boot_params *bp) out: RIP_REL_REF(sme_me_mask) = me_mask; - physical_mask &= ~me_mask; - cc_vendor = CC_VENDOR_AMD; + RIP_REL_REF(physical_mask) &= ~me_mask; + RIP_REL_REF(cc_vendor) = CC_VENDOR_AMD; cc_set_mask(me_mask); } diff --git a/arch/x86/mm/pat/cpa-test.c b/arch/x86/mm/pat/cpa-test.c index 423b21e80929..55ce39b6b1b0 100644 --- a/arch/x86/mm/pat/cpa-test.c +++ b/arch/x86/mm/pat/cpa-test.c @@ -183,7 +183,7 @@ static int pageattr_test(void) break; case 1: - err = change_page_attr_set(addrs, len[1], PAGE_CPA_TEST, 1); + err = change_page_attr_set(addrs, len[i], PAGE_CPA_TEST, 1); break; case 2: diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index c1e31e9a85d7..4918268d2007 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -385,9 +385,9 @@ static void cond_mitigation(struct task_struct *next) prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec); /* - * Avoid user/user BTB poisoning by flushing the branch predictor - * when switching between processes. This stops one process from - * doing Spectre-v2 attacks on another. + * Avoid user->user BTB/RSB poisoning by flushing them when switching + * between processes. This stops one process from doing Spectre-v2 + * attacks on another. * * Both, the conditional and the always IBPB mode use the mm * pointer to avoid the IBPB when switching between tasks of the @@ -617,7 +617,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); - /* Let nmi_uaccess_okay() know that we're changing CR3. */ + /* + * Indicate that CR3 is about to change. nmi_uaccess_okay() + * and others are sensitive to the window where mm_cpumask(), + * CR3 and cpu_tlbstate.loaded_mm are not all in sync. + */ this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); barrier(); } @@ -878,9 +882,51 @@ done: nr_invalidate); } -static bool tlb_is_not_lazy(int cpu, void *data) +static bool should_flush_tlb(int cpu, void *data) { - return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu); + struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu); + struct flush_tlb_info *info = data; + + /* + * Order the 'loaded_mm' and 'is_lazy' against their + * write ordering in switch_mm_irqs_off(). Ensure + * 'is_lazy' is at least as new as 'loaded_mm'. + */ + smp_rmb(); + + /* Lazy TLB will get flushed at the next context switch. */ + if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu)) + return false; + + /* No mm means kernel memory flush. */ + if (!info->mm) + return true; + + /* + * While switching, the remote CPU could have state from + * either the prev or next mm. Assume the worst and flush. + */ + if (loaded_mm == LOADED_MM_SWITCHING) + return true; + + /* The target mm is loaded, and the CPU is not lazy. */ + if (loaded_mm == info->mm) + return true; + + /* In cpumask, but not the loaded mm? Periodically remove by flushing. */ + if (info->trim_cpumask) + return true; + + return false; +} + +static bool should_trim_cpumask(struct mm_struct *mm) +{ + if (time_after(jiffies, READ_ONCE(mm->context.next_trim_cpumask))) { + WRITE_ONCE(mm->context.next_trim_cpumask, jiffies + HZ); + return true; + } + return false; } DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared); @@ -914,7 +960,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask, if (info->freed_tables) on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true); else - on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func, + on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func, (void *)info, 1, cpumask); } @@ -965,6 +1011,7 @@ static struct flush_tlb_info *get_flush_tlb_info(struct mm_struct *mm, info->freed_tables = freed_tables; info->new_tlb_gen = new_tlb_gen; info->initiating_cpu = smp_processor_id(); + info->trim_cpumask = 0; return info; } @@ -1007,6 +1054,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, * flush_tlb_func_local() directly in this case. */ if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + info->trim_cpumask = should_trim_cpumask(mm); flush_tlb_multi(mm_cpumask(mm), info); } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { lockdep_assert_irqs_enabled(); diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 92db785a0a8e..f3068bb53c4d 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -36,6 +36,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) +#define EMIT5(b1, b2, b3, b4, b5) \ + do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0) #define EMIT1_off32(b1, off) \ do { EMIT1(b1); EMIT(off, 4); } while (0) @@ -462,7 +464,11 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip) { u8 *prog = *pprog; - if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { + if (IS_ENABLED(CONFIG_MITIGATION_ITS) && + cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) { + OPTIMIZER_HIDE_VAR(reg); + emit_jump(&prog, its_static_thunk(reg), ip); + } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { EMIT_LFENCE(); EMIT2(0xFF, 0xE0 + reg); } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { @@ -481,7 +487,7 @@ static void emit_return(u8 **pprog, u8 *ip) { u8 *prog = *pprog; - if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { + if (cpu_wants_rethunk()) { emit_jump(&prog, x86_return_thunk, ip); } else { EMIT1(0xC3); /* ret */ @@ -947,6 +953,47 @@ static void emit_nops(u8 **pprog, int len) #define RESTORE_TAIL_CALL_CNT(stack) \ EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) +static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip, + struct bpf_prog *bpf_prog) +{ + u8 *prog = *pprog; + u8 *func; + + if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) { + /* The clearing sequence clobbers eax and ecx. */ + EMIT1(0x50); /* push rax */ + EMIT1(0x51); /* push rcx */ + ip += 2; + + func = (u8 *)clear_bhb_loop; + + if (emit_call(&prog, func, ip)) + return -EINVAL; + EMIT1(0x59); /* pop rcx */ + EMIT1(0x58); /* pop rax */ + } + /* Insert IBHF instruction */ + if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) && + cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) || + cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) { + /* + * Add an Indirect Branch History Fence (IBHF). IBHF acts as a + * fence preventing branch history from before the fence from + * affecting indirect branches after the fence. This is + * specifically used in cBPF jitted code to prevent Intra-mode + * BHI attacks. The IBHF instruction is designed to be a NOP on + * hardware that doesn't need or support it. The REP and REX.W + * prefixes are required by the microcode, and they also ensure + * that the NOP is unlikely to be used in existing code. + * + * IBHF is not a valid instruction in 32-bit mode. + */ + EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */ + } + *pprog = prog; + return 0; +} + static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, int oldproglen, struct jit_context *ctx, bool jmp_padding) { @@ -1760,6 +1807,15 @@ emit_jmp: seen_exit = true; /* Update cleanup_addr */ ctx->cleanup_addr = proglen; + + if (bpf_prog_was_classic(bpf_prog) && + !capable(CAP_SYS_ADMIN)) { + u8 *ip = image + addrs[i - 1]; + + if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog)) + return -EINVAL; + } + pop_callee_regs(&prog, callee_regs_used); EMIT1(0xC9); /* leave */ emit_return(&prog, image + addrs[i - 1] + (prog - temp)); diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 2f82480fd430..4421560a5e82 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -248,6 +248,125 @@ void __init pci_acpi_crs_quirks(void) printk(KERN_INFO "PCI: Please notify linux-pci@vger.kernel.org so future kernels can this automatically\n"); } +/* + * Check if pdev is part of a PCIe switch that is directly below the + * specified bridge. + */ +static bool pcie_switch_directly_under(struct pci_dev *bridge, + struct pci_dev *pdev) +{ + struct pci_dev *parent = pci_upstream_bridge(pdev); + + /* If the device doesn't have a parent, it's not under anything */ + if (!parent) + return false; + + /* + * If the device has a PCIe type, check if it is below the + * corresponding PCIe switch components (if applicable). Then check + * if its upstream port is directly beneath the specified bridge. + */ + switch (pci_pcie_type(pdev)) { + case PCI_EXP_TYPE_UPSTREAM: + return parent == bridge; + + case PCI_EXP_TYPE_DOWNSTREAM: + if (pci_pcie_type(parent) != PCI_EXP_TYPE_UPSTREAM) + return false; + parent = pci_upstream_bridge(parent); + return parent == bridge; + + case PCI_EXP_TYPE_ENDPOINT: + if (pci_pcie_type(parent) != PCI_EXP_TYPE_DOWNSTREAM) + return false; + parent = pci_upstream_bridge(parent); + if (!parent || pci_pcie_type(parent) != PCI_EXP_TYPE_UPSTREAM) + return false; + parent = pci_upstream_bridge(parent); + return parent == bridge; + } + + return false; +} + +static bool pcie_has_usb4_host_interface(struct pci_dev *pdev) +{ + struct fwnode_handle *fwnode; + + /* + * For USB4, the tunneled PCIe Root or Downstream Ports are marked + * with the "usb4-host-interface" ACPI property, so we look for + * that first. This should cover most cases. + */ + fwnode = fwnode_find_reference(dev_fwnode(&pdev->dev), + "usb4-host-interface", 0); + if (!IS_ERR(fwnode)) { + fwnode_handle_put(fwnode); + return true; + } + + /* + * Any integrated Thunderbolt 3/4 PCIe Root Ports from Intel + * before Alder Lake do not have the "usb4-host-interface" + * property so we use their PCI IDs instead. All these are + * tunneled. This list is not expected to grow. + */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL) { + switch (pdev->device) { + /* Ice Lake Thunderbolt 3 PCIe Root Ports */ + case 0x8a1d: + case 0x8a1f: + case 0x8a21: + case 0x8a23: + /* Tiger Lake-LP Thunderbolt 4 PCIe Root Ports */ + case 0x9a23: + case 0x9a25: + case 0x9a27: + case 0x9a29: + /* Tiger Lake-H Thunderbolt 4 PCIe Root Ports */ + case 0x9a2b: + case 0x9a2d: + case 0x9a2f: + case 0x9a31: + return true; + } + } + + return false; +} + +bool arch_pci_dev_is_removable(struct pci_dev *pdev) +{ + struct pci_dev *parent, *root; + + /* pdev without a parent or Root Port is never tunneled */ + parent = pci_upstream_bridge(pdev); + if (!parent) + return false; + root = pcie_find_root_port(pdev); + if (!root) + return false; + + /* Internal PCIe devices are not tunneled */ + if (!root->external_facing) + return false; + + /* Anything directly behind a "usb4-host-interface" is tunneled */ + if (pcie_has_usb4_host_interface(parent)) + return true; + + /* + * Check if this is a discrete Thunderbolt/USB4 controller that is + * directly behind the non-USB4 PCIe Root Port marked as + * "ExternalFacingPort". Those are not behind a PCIe tunnel. + */ + if (pcie_switch_directly_under(root, pdev)) + return false; + + /* PCIe devices after the discrete chip are tunneled */ + return true; +} + #ifdef CONFIG_PCI_MMCONFIG static int check_segment(u16 seg, struct device *dev, char *estr) { diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S index 7fe564eaf228..9e6a27018d0f 100644 --- a/arch/x86/platform/pvh/head.S +++ b/arch/x86/platform/pvh/head.S @@ -100,7 +100,12 @@ SYM_CODE_START_LOCAL(pvh_start_xen) xor %edx, %edx wrmsr - call xen_prepare_pvh + /* Call xen_prepare_pvh() via the kernel virtual mapping */ + leaq xen_prepare_pvh(%rip), %rax + subq phys_base(%rip), %rax + addq $__START_KERNEL_map, %rax + ANNOTATE_RETPOLINE_SAFE + call *%rax /* startup_64 expects boot_params in %rsi. */ mov $_pa(pvh_bootparams), %rsi diff --git a/arch/x86/um/os-Linux/mcontext.c b/arch/x86/um/os-Linux/mcontext.c index 49c3744cac37..81b9d1f9f4e6 100644 --- a/arch/x86/um/os-Linux/mcontext.c +++ b/arch/x86/um/os-Linux/mcontext.c @@ -26,7 +26,6 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc) COPY(RIP); COPY2(EFLAGS, EFL); COPY2(CS, CSGSFS); - regs->gp[CS / sizeof(unsigned long)] &= 0xffff; - regs->gp[CS / sizeof(unsigned long)] |= 3; + regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48; #endif } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 3c61bb98c10e..31f4a4dd8c62 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -5,6 +5,7 @@ #endif #include #include +#include #include #include #include @@ -25,7 +26,8 @@ #include "smp.h" #include "pmu.h" -EXPORT_SYMBOL_GPL(hypercall_page); +DEFINE_STATIC_CALL(xen_hypercall, xen_hypercall_hvm); +EXPORT_STATIC_CALL_TRAMP(xen_hypercall); /* * Pointer to the xen_vcpu_info structure or @@ -72,6 +74,67 @@ EXPORT_SYMBOL(xen_start_flags); */ struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; +static __ref void xen_get_vendor(void) +{ + init_cpu_devs(); + cpu_detect(&boot_cpu_data); + get_cpu_vendor(&boot_cpu_data); +} + +void xen_hypercall_setfunc(void) +{ + if (static_call_query(xen_hypercall) != xen_hypercall_hvm) + return; + + if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) + static_call_update(xen_hypercall, xen_hypercall_amd); + else + static_call_update(xen_hypercall, xen_hypercall_intel); +} + +/* + * Evaluate processor vendor in order to select the correct hypercall + * function for HVM/PVH guests. + * Might be called very early in boot before vendor has been set by + * early_cpu_init(). + */ +noinstr void *__xen_hypercall_setfunc(void) +{ + void (*func)(void); + + /* + * Xen is supported only on CPUs with CPUID, so testing for + * X86_FEATURE_CPUID is a test for early_cpu_init() having been + * run. + * + * Note that __xen_hypercall_setfunc() is noinstr only due to a nasty + * dependency chain: it is being called via the xen_hypercall static + * call when running as a PVH or HVM guest. Hypercalls need to be + * noinstr due to PV guests using hypercalls in noinstr code. So we + * can safely tag the function body as "instrumentation ok", since + * the PV guest requirement is not of interest here (xen_get_vendor() + * calls noinstr functions, and static_call_update_early() might do + * so, too). + */ + instrumentation_begin(); + + if (!boot_cpu_has(X86_FEATURE_CPUID)) + xen_get_vendor(); + + if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) + func = xen_hypercall_amd; + else + func = xen_hypercall_intel; + + static_call_update_early(xen_hypercall, func); + + instrumentation_end(); + + return func; +} + static int xen_cpu_up_online(unsigned int cpu) { xen_init_lock_cpu(cpu); diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index c66807dd0270..3a453207f3c7 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -108,15 +108,8 @@ static void __init init_hvm_pv_info(void) /* PVH set up hypercall page in xen_prepare_pvh(). */ if (xen_pvh_domain()) pv_info.name = "Xen PVH"; - else { - u64 pfn; - uint32_t msr; - + else pv_info.name = "Xen HVM"; - msr = cpuid_ebx(base + 2); - pfn = __pa(hypercall_page); - wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); - } xen_setup_features(); @@ -299,6 +292,10 @@ static uint32_t __init xen_platform_hvm(void) if (xen_pv_domain()) return 0; + /* Set correct hypercall function. */ + if (xen_domain) + xen_hypercall_setfunc(); + if (xen_pvh_domain() && nopv) { /* Guest booting via the Xen-PVH boot entry goes here */ pr_info("\"nopv\" parameter is ignored in PVH guest\n"); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 9280e15de3af..ee8f452cc58b 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1248,6 +1248,9 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si) xen_domain_type = XEN_PV_DOMAIN; xen_start_flags = xen_start_info->flags; + /* Interrupts are guaranteed to be off initially. */ + early_boot_irqs_disabled = true; + static_call_update_early(xen_hypercall, xen_hypercall_pv); xen_setup_features(); @@ -1340,7 +1343,6 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si) WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); local_irq_disable(); - early_boot_irqs_disabled = true; xen_raw_console_write("mapping kernel into physical memory\n"); xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index ada3868c02c2..00ee9399fd0c 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -27,17 +27,10 @@ EXPORT_SYMBOL_GPL(xen_pvh); void __init xen_pvh_init(struct boot_params *boot_params) { - u32 msr; - u64 pfn; - xen_pvh = 1; xen_domain_type = XEN_HVM_DOMAIN; xen_start_flags = pvh_start_info.flags; - msr = cpuid_ebx(xen_cpuid_base() + 2); - pfn = __pa(hypercall_page); - wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); - if (xen_initial_domain()) x86_init.oem.arch_setup = xen_add_preferred_consoles; x86_init.oem.banner = xen_banner; diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index ee29fb558f2e..7dabdb999565 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -97,6 +97,51 @@ static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; */ static DEFINE_SPINLOCK(xen_reservation_lock); +/* Protected by xen_reservation_lock. */ +#define MIN_CONTIG_ORDER 9 /* 2MB */ +static unsigned int discontig_frames_order = MIN_CONTIG_ORDER; +static unsigned long discontig_frames_early[1UL << MIN_CONTIG_ORDER] __initdata; +static unsigned long *discontig_frames __refdata = discontig_frames_early; +static bool discontig_frames_dyn; + +static int alloc_discontig_frames(unsigned int order) +{ + unsigned long *new_array, *old_array; + unsigned int old_order; + unsigned long flags; + + BUG_ON(order < MIN_CONTIG_ORDER); + BUILD_BUG_ON(sizeof(discontig_frames_early) != PAGE_SIZE); + + new_array = (unsigned long *)__get_free_pages(GFP_KERNEL, + order - MIN_CONTIG_ORDER); + if (!new_array) + return -ENOMEM; + + spin_lock_irqsave(&xen_reservation_lock, flags); + + old_order = discontig_frames_order; + + if (order > discontig_frames_order || !discontig_frames_dyn) { + if (!discontig_frames_dyn) + old_array = NULL; + else + old_array = discontig_frames; + + discontig_frames = new_array; + discontig_frames_order = order; + discontig_frames_dyn = true; + } else { + old_array = new_array; + } + + spin_unlock_irqrestore(&xen_reservation_lock, flags); + + free_pages((unsigned long)old_array, old_order - MIN_CONTIG_ORDER); + + return 0; +} + /* * Note about cr3 (pagetable base) values: * @@ -766,6 +811,7 @@ void xen_mm_pin_all(void) { struct page *page; + spin_lock(&init_mm.page_table_lock); spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { @@ -776,6 +822,7 @@ void xen_mm_pin_all(void) } spin_unlock(&pgd_lock); + spin_unlock(&init_mm.page_table_lock); } static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page, @@ -797,6 +844,9 @@ static void __init xen_after_bootmem(void) SetPagePinned(virt_to_page(level3_user_vsyscall)); #endif xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); + + if (alloc_discontig_frames(MIN_CONTIG_ORDER)) + BUG(); } static void xen_unpin_page(struct mm_struct *mm, struct page *page, @@ -872,6 +922,7 @@ void xen_mm_unpin_all(void) { struct page *page; + spin_lock(&init_mm.page_table_lock); spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { @@ -883,6 +934,7 @@ void xen_mm_unpin_all(void) } spin_unlock(&pgd_lock); + spin_unlock(&init_mm.page_table_lock); } static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) @@ -2177,10 +2229,6 @@ void __init xen_init_mmu_ops(void) memset(dummy_mapping, 0xff, PAGE_SIZE); } -/* Protected by xen_reservation_lock. */ -#define MAX_CONTIG_ORDER 9 /* 2MB */ -static unsigned long discontig_frames[1< discontig_frames_order)) { + if (!discontig_frames_dyn) + return -ENOMEM; - if (unlikely(order > MAX_CONTIG_ORDER)) - return -ENOMEM; + if (alloc_discontig_frames(order)) + return -ENOMEM; + } memset((void *) vstart, 0, PAGE_SIZE << order); spin_lock_irqsave(&xen_reservation_lock, flags); + in_frames = discontig_frames; + /* 1. Zap current PTEs, remembering MFNs. */ xen_zap_pfn_range(vstart, order, in_frames, NULL); @@ -2338,12 +2387,12 @@ int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) { - unsigned long *out_frames = discontig_frames, in_frame; + unsigned long *out_frames, in_frame; unsigned long flags; int success; unsigned long vstart; - if (unlikely(order > MAX_CONTIG_ORDER)) + if (unlikely(order > discontig_frames_order)) return; vstart = (unsigned long)phys_to_virt(pstart); @@ -2351,6 +2400,8 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) spin_lock_irqsave(&xen_reservation_lock, flags); + out_frames = discontig_frames; + /* 1. Find start MFN of contiguous extent. */ in_frame = virt_to_mfn(vstart); diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index dec5e03e7a2c..a9b878fd6ee0 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -20,9 +20,32 @@ #include #include +#include #include <../entry/calling.h> .pushsection .noinstr.text, "ax" +/* + * PV hypercall interface to the hypervisor. + * + * Called via inline asm(), so better preserve %rcx and %r11. + * + * Input: + * %eax: hypercall number + * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall + * Output: %rax + */ +SYM_FUNC_START(xen_hypercall_pv) + ANNOTATE_NOENDBR + push %rcx + push %r11 + UNWIND_HINT_SAVE + syscall + UNWIND_HINT_RESTORE + pop %r11 + pop %rcx + RET +SYM_FUNC_END(xen_hypercall_pv) + /* * Disabling events is simply a matter of making the event mask * non-zero. @@ -176,7 +199,6 @@ SYM_CODE_START(xen_early_idt_handler_array) SYM_CODE_END(xen_early_idt_handler_array) __FINIT -hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 /* * Xen64 iret frame: * @@ -186,17 +208,28 @@ hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 * cs * rip <-- standard iret frame * - * flags + * flags <-- xen_iret must push from here on * - * rcx } - * r11 }<-- pushed by hypercall page - * rsp->rax } + * rcx + * r11 + * rsp->rax */ +.macro xen_hypercall_iret + pushq $0 /* Flags */ + push %rcx + push %r11 + push %rax + mov $__HYPERVISOR_iret, %eax + syscall /* Do the IRET. */ +#ifdef CONFIG_SLS + int3 +#endif +.endm + SYM_CODE_START(xen_iret) UNWIND_HINT_EMPTY ANNOTATE_NOENDBR - pushq $0 - jmp hypercall_iret + xen_hypercall_iret SYM_CODE_END(xen_iret) /* @@ -301,8 +334,7 @@ SYM_CODE_START(xen_entry_SYSENTER_compat) ENDBR lea 16(%rsp), %rsp /* strip %rcx, %r11 */ mov $-ENOSYS, %rax - pushq $0 - jmp hypercall_iret + xen_hypercall_iret SYM_CODE_END(xen_entry_SYSENTER_compat) SYM_CODE_END(xen_entry_SYSCALL_compat) diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S index ffaa62167f6e..31d9e56ad6c6 100644 --- a/arch/x86/xen/xen-head.S +++ b/arch/x86/xen/xen-head.S @@ -6,9 +6,11 @@ #include #include +#include #include #include +#include #include #include #include @@ -20,28 +22,6 @@ #include #include -.pushsection .noinstr.text, "ax" - .balign PAGE_SIZE -SYM_CODE_START(hypercall_page) - .rept (PAGE_SIZE / 32) - UNWIND_HINT_FUNC - ANNOTATE_NOENDBR - ANNOTATE_UNRET_SAFE - ret - /* - * Xen will write the hypercall page, and sort out ENDBR. - */ - .skip 31, 0xcc - .endr - -#define HYPERCALL(n) \ - .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ - .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 -#include -#undef HYPERCALL -SYM_CODE_END(hypercall_page) -.popsection - #ifdef CONFIG_XEN_PV __INIT SYM_CODE_START(startup_xen) @@ -80,6 +60,88 @@ SYM_CODE_END(asm_cpu_bringup_and_idle) #endif #endif + .pushsection .noinstr.text, "ax" +/* + * Xen hypercall interface to the hypervisor. + * + * Input: + * %eax: hypercall number + * 32-bit: + * %ebx, %ecx, %edx, %esi, %edi: args 1..5 for the hypercall + * 64-bit: + * %rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall + * Output: %[er]ax + */ +SYM_FUNC_START(xen_hypercall_hvm) + ENDBR + FRAME_BEGIN + /* Save all relevant registers (caller save and arguments). */ +#ifdef CONFIG_X86_32 + push %eax + push %ebx + push %ecx + push %edx + push %esi + push %edi +#else + push %rax + push %rcx + push %rdx + push %rdi + push %rsi + push %r11 + push %r10 + push %r9 + push %r8 +#ifdef CONFIG_FRAME_POINTER + pushq $0 /* Dummy push for stack alignment. */ +#endif +#endif + /* Set the vendor specific function. */ + call __xen_hypercall_setfunc + /* Set ZF = 1 if AMD, Restore saved registers. */ +#ifdef CONFIG_X86_32 + lea xen_hypercall_amd, %ebx + cmp %eax, %ebx + pop %edi + pop %esi + pop %edx + pop %ecx + pop %ebx + pop %eax +#else + lea xen_hypercall_amd(%rip), %rcx + cmp %rax, %rcx +#ifdef CONFIG_FRAME_POINTER + pop %rax /* Dummy pop. */ +#endif + pop %r8 + pop %r9 + pop %r10 + pop %r11 + pop %rsi + pop %rdi + pop %rdx + pop %rcx + pop %rax +#endif + FRAME_END + /* Use correct hypercall function. */ + jz xen_hypercall_amd + jmp xen_hypercall_intel +SYM_FUNC_END(xen_hypercall_hvm) + +SYM_FUNC_START(xen_hypercall_amd) + vmmcall + RET +SYM_FUNC_END(xen_hypercall_amd) + +SYM_FUNC_START(xen_hypercall_intel) + vmcall + RET +SYM_FUNC_END(xen_hypercall_intel) + .popsection + ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") @@ -93,7 +155,6 @@ SYM_CODE_END(asm_cpu_bringup_and_idle) #ifdef CONFIG_XEN_PV ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen) #endif - ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page) ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables|pae_pgdir_above_4gb") ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index b2b2f4315b78..3a939418a2f2 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -162,4 +162,13 @@ void xen_hvm_post_suspend(int suspend_cancelled); static inline void xen_hvm_post_suspend(int suspend_cancelled) {} #endif +#ifdef CONFIG_XEN_PV +void xen_hypercall_pv(void); +#endif +void xen_hypercall_hvm(void); +void xen_hypercall_amd(void); +void xen_hypercall_intel(void); +void xen_hypercall_setfunc(void); +void *__xen_hypercall_setfunc(void); + #endif /* XEN_OPS_H */ diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 60b4299bec8e..c202e2527d05 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -704,6 +704,46 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_put_queue(bfqq); } +static void bfq_sync_bfqq_move(struct bfq_data *bfqd, + struct bfq_queue *sync_bfqq, + struct bfq_io_cq *bic, + struct bfq_group *bfqg, + unsigned int act_idx) +{ + struct bfq_queue *bfqq; + + if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) { + /* We are the only user of this bfqq, just move it */ + if (sync_bfqq->entity.sched_data != &bfqg->sched_data) + bfq_bfqq_move(bfqd, sync_bfqq, bfqg); + return; + } + + /* + * The queue was merged to a different queue. Check + * that the merge chain still belongs to the same + * cgroup. + */ + for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq) + if (bfqq->entity.sched_data != &bfqg->sched_data) + break; + if (bfqq) { + /* + * Some queue changed cgroup so the merge is not valid + * anymore. We cannot easily just cancel the merge (by + * clearing new_bfqq) as there may be other processes + * using this queue and holding refs to all queues + * below sync_bfqq->new_bfqq. Similarly if the merge + * already happened, we need to detach from bfqq now + * so that we cannot merge bio to a request from the + * old cgroup. + */ + bfq_put_cooperator(sync_bfqq); + bic_set_bfqq(bic, NULL, true, act_idx); + bfq_release_process_ref(bfqd, sync_bfqq); + } +} + /** * __bfq_bic_change_cgroup - move @bic to @bfqg. * @bfqd: the queue descriptor. @@ -714,60 +754,25 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, * sure that the reference to cgroup is valid across the call (see * comments in bfq_bic_update_cgroup on this issue) */ -static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +static void __bfq_bic_change_cgroup(struct bfq_data *bfqd, struct bfq_io_cq *bic, struct bfq_group *bfqg) { - struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false); - struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true); - struct bfq_entity *entity; + unsigned int act_idx; - if (async_bfqq) { - entity = &async_bfqq->entity; + for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) { + struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx); + struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx); - if (entity->sched_data != &bfqg->sched_data) { - bic_set_bfqq(bic, NULL, false); + if (async_bfqq && + async_bfqq->entity.sched_data != &bfqg->sched_data) { + bic_set_bfqq(bic, NULL, false, act_idx); bfq_release_process_ref(bfqd, async_bfqq); } + + if (sync_bfqq) + bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx); } - - if (sync_bfqq) { - if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) { - /* We are the only user of this bfqq, just move it */ - if (sync_bfqq->entity.sched_data != &bfqg->sched_data) - bfq_bfqq_move(bfqd, sync_bfqq, bfqg); - } else { - struct bfq_queue *bfqq; - - /* - * The queue was merged to a different queue. Check - * that the merge chain still belongs to the same - * cgroup. - */ - for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq) - if (bfqq->entity.sched_data != - &bfqg->sched_data) - break; - if (bfqq) { - /* - * Some queue changed cgroup so the merge is - * not valid anymore. We cannot easily just - * cancel the merge (by clearing new_bfqq) as - * there may be other processes using this - * queue and holding refs to all queues below - * sync_bfqq->new_bfqq. Similarly if the merge - * already happened, we need to detach from - * bfqq now so that we cannot merge bio to a - * request from the old cgroup. - */ - bfq_put_cooperator(sync_bfqq); - bic_set_bfqq(bic, NULL, true); - bfq_release_process_ref(bfqd, sync_bfqq); - } - } - } - - return bfqg; } void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index 8e797782cfe3..d1c62a4e8c35 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -377,16 +377,23 @@ static const unsigned long bfq_late_stable_merging = 600; #define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0])) #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) -struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync) +struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync, + unsigned int actuator_idx) { - return bic->bfqq[is_sync]; + if (is_sync) + return bic->bfqq[1][actuator_idx]; + + return bic->bfqq[0][actuator_idx]; } static void bfq_put_stable_ref(struct bfq_queue *bfqq); -void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) +void bic_set_bfqq(struct bfq_io_cq *bic, + struct bfq_queue *bfqq, + bool is_sync, + unsigned int actuator_idx) { - struct bfq_queue *old_bfqq = bic->bfqq[is_sync]; + struct bfq_queue *old_bfqq = bic->bfqq[is_sync][actuator_idx]; /* Clear bic pointer if bfqq is detached from this bic */ if (old_bfqq && old_bfqq->bic == bic) @@ -405,7 +412,10 @@ void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync) * we cancel the stable merge if * bic->stable_merge_bfqq == bfqq. */ - bic->bfqq[is_sync] = bfqq; + if (is_sync) + bic->bfqq[1][actuator_idx] = bfqq; + else + bic->bfqq[0][actuator_idx] = bfqq; if (bfqq && bic->stable_merge_bfqq == bfqq) { /* @@ -571,23 +581,31 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd, #define BFQ_LIMIT_INLINE_DEPTH 16 #ifdef CONFIG_BFQ_GROUP_IOSCHED -static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit) +static bool bfqq_request_over_limit(struct bfq_data *bfqd, + struct bfq_io_cq *bic, blk_opf_t opf, + unsigned int act_idx, int limit) { - struct bfq_data *bfqd = bfqq->bfqd; - struct bfq_entity *entity = &bfqq->entity; struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH]; struct bfq_entity **entities = inline_entities; - int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH; - int class_idx = bfqq->ioprio_class - 1; + int alloc_depth = BFQ_LIMIT_INLINE_DEPTH; struct bfq_sched_data *sched_data; + struct bfq_entity *entity; + struct bfq_queue *bfqq; unsigned long wsum; bool ret = false; - - if (!entity->on_st_or_in_serv) - return false; + int depth; + int level; retry: spin_lock_irq(&bfqd->lock); + bfqq = bic_to_bfqq(bic, op_is_sync(opf), act_idx); + if (!bfqq) + goto out; + + entity = &bfqq->entity; + if (!entity->on_st_or_in_serv) + goto out; + /* +1 for bfqq entity, root cgroup not included */ depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1; if (depth > alloc_depth) { @@ -632,7 +650,7 @@ retry: * class. */ wsum = 0; - for (i = 0; i <= class_idx; i++) { + for (i = 0; i <= bfqq->ioprio_class - 1; i++) { wsum = wsum * IOPRIO_BE_NR + sched_data->service_tree[i].wsum; } @@ -655,7 +673,9 @@ out: return ret; } #else -static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit) +static bool bfqq_request_over_limit(struct bfq_data *bfqd, + struct bfq_io_cq *bic, blk_opf_t opf, + unsigned int act_idx, int limit) { return false; } @@ -680,9 +700,9 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) { struct bfq_data *bfqd = data->q->elevator->elevator_data; struct bfq_io_cq *bic = bfq_bic_lookup(data->q); - struct bfq_queue *bfqq = bic ? bic_to_bfqq(bic, op_is_sync(opf)) : NULL; int depth; unsigned limit = data->q->nr_requests; + unsigned int act_idx; /* Sync reads have full depth available */ if (op_is_sync(opf) && !op_is_write(opf)) { @@ -692,14 +712,22 @@ static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) limit = (limit * depth) >> bfqd->full_depth_shift; } - /* - * Does queue (or any parent entity) exceed number of requests that - * should be available to it? Heavily limit depth so that it cannot - * consume more available requests and thus starve other entities. - */ - if (bfqq && bfqq_request_over_limit(bfqq, limit)) - depth = 1; + for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) { + /* Fast path to check if bfqq is already allocated. */ + if (!bic_to_bfqq(bic, op_is_sync(opf), act_idx)) + continue; + /* + * Does queue (or any parent entity) exceed number of + * requests that should be available to it? Heavily + * limit depth so that it cannot consume more + * available requests and thus starve other entities. + */ + if (bfqq_request_over_limit(bfqd, bic, opf, act_idx, limit)) { + depth = 1; + break; + } + } bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u", __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth); if (depth) @@ -1820,6 +1848,18 @@ static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq, return bfqq_weight > in_serv_weight; } +/* + * Get the index of the actuator that will serve bio. + */ +static unsigned int bfq_actuator_index(struct bfq_data *bfqd, struct bio *bio) +{ + /* + * Multi-actuator support not complete yet, so always return 0 + * for the moment (to keep incomplete mechanisms off). + */ + return 0; +} + static bool bfq_better_to_idle(struct bfq_queue *bfqq); static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, @@ -2150,7 +2190,7 @@ static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq, * We reset waker detection logic also if too much time has passed * since the first detection. If wakeups are rare, pointless idling * doesn't hurt throughput that much. The condition below makes sure - * we do not uselessly idle blocking waker in more than 1/64 cases. + * we do not uselessly idle blocking waker in more than 1/64 cases. */ if (bfqd->last_completed_rq_bfqq != bfqq->tentative_waker_bfqq || @@ -2486,7 +2526,8 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio, */ bfq_bic_update_cgroup(bic, bio); - bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); + bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf), + bfq_actuator_index(bfqd, bio)); } else { bfqd->bio_bfqq = NULL; } @@ -3188,7 +3229,7 @@ static struct bfq_queue *bfq_merge_bfqqs(struct bfq_data *bfqd, /* * Merge queues (that is, let bic redirect its requests to new_bfqq) */ - bic_set_bfqq(bic, new_bfqq, true); + bic_set_bfqq(bic, new_bfqq, true, bfqq->actuator_idx); bfq_mark_bfqq_coop(new_bfqq); /* * new_bfqq now belongs to at least two bics (it is a shared queue): @@ -4818,11 +4859,8 @@ check_queue: */ if (bfq_bfqq_wait_request(bfqq) || (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { - struct bfq_queue *async_bfqq = - bfqq->bic && bfqq->bic->bfqq[0] && - bfq_bfqq_busy(bfqq->bic->bfqq[0]) && - bfqq->bic->bfqq[0]->next_rq ? - bfqq->bic->bfqq[0] : NULL; + unsigned int act_idx = bfqq->actuator_idx; + struct bfq_queue *async_bfqq = NULL; struct bfq_queue *blocked_bfqq = !hlist_empty(&bfqq->woken_list) ? container_of(bfqq->woken_list.first, @@ -4830,6 +4868,10 @@ check_queue: woken_list_node) : NULL; + if (bfqq->bic && bfqq->bic->bfqq[0][act_idx] && + bfq_bfqq_busy(bfqq->bic->bfqq[0][act_idx]) && + bfqq->bic->bfqq[0][act_idx]->next_rq) + async_bfqq = bfqq->bic->bfqq[0][act_idx]; /* * The next four mutually-exclusive ifs decide * whether to try injection, and choose the queue to @@ -4914,7 +4956,7 @@ check_queue: icq_to_bic(async_bfqq->next_rq->elv.icq) == bfqq->bic && bfq_serv_to_charge(async_bfqq->next_rq, async_bfqq) <= bfq_bfqq_budget_left(async_bfqq)) - bfqq = bfqq->bic->bfqq[0]; + bfqq = bfqq->bic->bfqq[0][act_idx]; else if (bfqq->waker_bfqq && bfq_bfqq_busy(bfqq->waker_bfqq) && bfqq->waker_bfqq->next_rq && @@ -5375,48 +5417,54 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_release_process_ref(bfqd, bfqq); } -static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync) +static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync, + unsigned int actuator_idx) { - struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); + struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, actuator_idx); struct bfq_data *bfqd; if (bfqq) bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ if (bfqq && bfqd) { - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); - bic_set_bfqq(bic, NULL, is_sync); + bic_set_bfqq(bic, NULL, is_sync, actuator_idx); bfq_exit_bfqq(bfqd, bfqq); - spin_unlock_irqrestore(&bfqd->lock, flags); } } static void bfq_exit_icq(struct io_cq *icq) { struct bfq_io_cq *bic = icq_to_bic(icq); + struct bfq_data *bfqd = bic_to_bfqd(bic); + unsigned long flags; + unsigned int act_idx; + /* + * If bfqd and thus bfqd->num_actuators is not available any + * longer, then cycle over all possible per-actuator bfqqs in + * next loop. We rely on bic being zeroed on creation, and + * therefore on its unused per-actuator fields being NULL. + */ + unsigned int num_actuators = BFQ_MAX_ACTUATORS; - if (bic->stable_merge_bfqq) { - struct bfq_data *bfqd = bic->stable_merge_bfqq->bfqd; - - /* - * bfqd is NULL if scheduler already exited, and in - * that case this is the last time bfqq is accessed. - */ - if (bfqd) { - unsigned long flags; - - spin_lock_irqsave(&bfqd->lock, flags); - bfq_put_stable_ref(bic->stable_merge_bfqq); - spin_unlock_irqrestore(&bfqd->lock, flags); - } else { - bfq_put_stable_ref(bic->stable_merge_bfqq); - } + /* + * bfqd is NULL if scheduler already exited, and in that case + * this is the last time these queues are accessed. + */ + if (bfqd) { + spin_lock_irqsave(&bfqd->lock, flags); + num_actuators = bfqd->num_actuators; } - bfq_exit_icq_bfqq(bic, true); - bfq_exit_icq_bfqq(bic, false); + if (bic->stable_merge_bfqq) + bfq_put_stable_ref(bic->stable_merge_bfqq); + + for (act_idx = 0; act_idx < num_actuators; act_idx++) { + bfq_exit_icq_bfqq(bic, true, act_idx); + bfq_exit_icq_bfqq(bic, false, act_idx); + } + + if (bfqd) + spin_unlock_irqrestore(&bfqd->lock, flags); } /* @@ -5493,25 +5541,27 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) bic->ioprio = ioprio; - bfqq = bic_to_bfqq(bic, false); + bfqq = bic_to_bfqq(bic, false, bfq_actuator_index(bfqd, bio)); if (bfqq) { struct bfq_queue *old_bfqq = bfqq; bfqq = bfq_get_queue(bfqd, bio, false, bic, true); - bic_set_bfqq(bic, bfqq, false); + bic_set_bfqq(bic, bfqq, false, bfq_actuator_index(bfqd, bio)); bfq_release_process_ref(bfqd, old_bfqq); } - bfqq = bic_to_bfqq(bic, true); + bfqq = bic_to_bfqq(bic, true, bfq_actuator_index(bfqd, bio)); if (bfqq) bfq_set_next_ioprio_data(bfqq, bic); } static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, - struct bfq_io_cq *bic, pid_t pid, int is_sync) + struct bfq_io_cq *bic, pid_t pid, int is_sync, + unsigned int act_idx) { u64 now_ns = ktime_get_ns(); + bfqq->actuator_idx = act_idx; RB_CLEAR_NODE(&bfqq->entity.rb_node); INIT_LIST_HEAD(&bfqq->fifo); INIT_HLIST_NODE(&bfqq->burst_list_node); @@ -5762,7 +5812,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, if (bfqq) { bfq_init_bfqq(bfqd, bfqq, bic, current->pid, - is_sync); + is_sync, bfq_actuator_index(bfqd, bio)); bfq_init_entity(&bfqq->entity, bfqg); bfq_log_bfqq(bfqd, bfqq, "allocated"); } else { @@ -6078,7 +6128,8 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) * then complete the merge and redirect it to * new_bfqq. */ - if (bic_to_bfqq(RQ_BIC(rq), 1) == bfqq) { + if (bic_to_bfqq(RQ_BIC(rq), true, + bfq_actuator_index(bfqd, rq->bio)) == bfqq) { while (bfqq != new_bfqq) bfqq = bfq_merge_bfqqs(bfqd, RQ_BIC(rq), bfqq); } @@ -6632,7 +6683,7 @@ bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) return bfqq; } - bic_set_bfqq(bic, NULL, true); + bic_set_bfqq(bic, NULL, true, bfqq->actuator_idx); bfq_put_cooperator(bfqq); @@ -6646,7 +6697,8 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, bool split, bool is_sync, bool *new_queue) { - struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync); + unsigned int act_idx = bfq_actuator_index(bfqd, bio); + struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, act_idx); if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) return bfqq; @@ -6658,7 +6710,7 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, bfq_put_queue(bfqq); bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, split); - bic_set_bfqq(bic, bfqq, is_sync); + bic_set_bfqq(bic, bfqq, is_sync, act_idx); if (split && is_sync) { if ((bic->was_in_burst_list && bfqd->large_burst) || bic->saved_in_large_burst) @@ -6733,16 +6785,24 @@ static struct bfq_queue *bfq_waker_bfqq(struct bfq_queue *bfqq) if (new_bfqq == waker_bfqq) { /* * If waker_bfqq is in the merge chain, and current - * is the only procress. + * is the only process, waker_bfqq can be freed. */ if (bfqq_process_refs(waker_bfqq) == 1) return NULL; - break; + + return waker_bfqq; } new_bfqq = new_bfqq->new_bfqq; } + /* + * If waker_bfqq is not in the merge chain, and it's procress reference + * is 0, waker_bfqq can be freed. + */ + if (bfqq_process_refs(waker_bfqq) == 0) + return NULL; + return waker_bfqq; } @@ -7131,8 +7191,10 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. * Grab a permanent reference to it, so that the normal code flow * will not attempt to free it. + * Set zero as actuator index: we will pretend that + * all I/O requests are for the same actuator. */ - bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); + bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0, 0); bfqd->oom_bfqq.ref++; bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; @@ -7151,6 +7213,13 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfqd->queue = q; + /* + * Multi-actuator support not complete yet, unconditionally + * set to only one actuator for the moment (to keep incomplete + * mechanisms off). + */ + bfqd->num_actuators = 1; + INIT_LIST_HEAD(&bfqd->dispatch); hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 71f721670ab6..2b413ddffbb9 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -33,6 +33,14 @@ */ #define BFQ_SOFTRT_WEIGHT_FACTOR 100 +/* + * Maximum number of actuators supported. This constant is used simply + * to define the size of the static array that will contain + * per-actuator data. The current value is hopefully a good upper + * bound to the possible number of actuators of any actual drive. + */ +#define BFQ_MAX_ACTUATORS 8 + struct bfq_entity; /** @@ -225,12 +233,14 @@ struct bfq_ttime { * struct bfq_queue - leaf schedulable entity. * * A bfq_queue is a leaf request queue; it can be associated with an - * io_context or more, if it is async or shared between cooperating - * processes. @cgroup holds a reference to the cgroup, to be sure that it - * does not disappear while a bfqq still references it (mostly to avoid - * races between request issuing and task migration followed by cgroup - * destruction). - * All the fields are protected by the queue lock of the containing bfqd. + * io_context or more, if it is async or shared between cooperating + * processes. Besides, it contains I/O requests for only one actuator + * (an io_context is associated with a different bfq_queue for each + * actuator it generates I/O for). @cgroup holds a reference to the + * cgroup, to be sure that it does not disappear while a bfqq still + * references it (mostly to avoid races between request issuing and + * task migration followed by cgroup destruction). All the fields are + * protected by the queue lock of the containing bfqd. */ struct bfq_queue { /* reference counter */ @@ -395,6 +405,9 @@ struct bfq_queue { * the woken queues when this queue exits. */ struct hlist_head woken_list; + + /* index of the actuator this queue is associated with */ + unsigned int actuator_idx; }; /** @@ -403,8 +416,17 @@ struct bfq_queue { struct bfq_io_cq { /* associated io_cq structure */ struct io_cq icq; /* must be the first member */ - /* array of two process queues, the sync and the async */ - struct bfq_queue *bfqq[2]; + /* + * Matrix of associated process queues: first row for async + * queues, second row sync queues. Each row contains one + * column for each actuator. An I/O request generated by the + * process is inserted into the queue pointed by bfqq[i][j] if + * the request is to be served by the j-th actuator of the + * drive, where i==0 or i==1, depending on whether the request + * is async or sync. So there is a distinct queue for each + * actuator. + */ + struct bfq_queue *bfqq[2][BFQ_MAX_ACTUATORS]; /* per (request_queue, blkcg) ioprio */ int ioprio; #ifdef CONFIG_BFQ_GROUP_IOSCHED @@ -768,6 +790,13 @@ struct bfq_data { */ unsigned int word_depths[2][2]; unsigned int full_depth_shift; + + /* + * Number of independent actuators. This is equal to 1 in + * case of single-actuator drives. + */ + unsigned int num_actuators; + }; enum bfqq_state_flags { @@ -964,8 +993,10 @@ struct bfq_group { extern const int bfq_timeout; -struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync); -void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync); +struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync, + unsigned int actuator_idx); +void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync, + unsigned int actuator_idx); struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic); void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq, diff --git a/block/bio.c b/block/bio.c index 3318e0022fdf..7e2ce61bf247 100644 --- a/block/bio.c +++ b/block/bio.c @@ -73,7 +73,7 @@ struct bio_slab { struct kmem_cache *slab; unsigned int slab_ref; unsigned int slab_size; - char name[8]; + char name[12]; }; static DEFINE_MUTEX(bio_slab_lock); static DEFINE_XARRAY(bio_slabs); @@ -576,7 +576,7 @@ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) { struct bio *bio; - if (nr_vecs > UIO_MAXIOV) + if (nr_vecs > BIO_MAX_INLINE_VECS) return NULL; return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); } diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 1b7fd1fc2f33..ef596fc10465 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -255,6 +255,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk, blkg->pd[i] = pd; pd->blkg = blkg; pd->plid = i; + pd->online = false; } return blkg; @@ -326,8 +327,11 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk, for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; - if (blkg->pd[i] && pol->pd_online_fn) - pol->pd_online_fn(blkg->pd[i]); + if (blkg->pd[i]) { + if (pol->pd_online_fn) + pol->pd_online_fn(blkg->pd[i]); + blkg->pd[i]->online = true; + } } } blkg->online = true; @@ -432,8 +436,11 @@ static void blkg_destroy(struct blkcg_gq *blkg) for (i = 0; i < BLKCG_MAX_POLS; i++) { struct blkcg_policy *pol = blkcg_policy[i]; - if (blkg->pd[i] && pol->pd_offline_fn) - pol->pd_offline_fn(blkg->pd[i]); + if (blkg->pd[i] && blkg->pd[i]->online) { + if (pol->pd_offline_fn) + pol->pd_offline_fn(blkg->pd[i]); + blkg->pd[i]->online = false; + } } blkg->online = false; @@ -924,6 +931,7 @@ static void blkcg_fill_root_iostats(void) blkg_iostat_set(&blkg->iostat.cur, &tmp); u64_stats_update_end_irqrestore(&blkg->iostat.sync, flags); } + class_dev_iter_exit(&iter); } static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s) @@ -1110,10 +1118,14 @@ void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css) struct blkcg *blkcg = css_to_blkcg(blkcg_css); do { + struct blkcg *parent; + if (!refcount_dec_and_test(&blkcg->online_pin)) break; + + parent = blkcg_parent(blkcg); blkcg_destroy_blkgs(blkcg); - blkcg = blkcg_parent(blkcg); + blkcg = parent; } while (blkcg); } @@ -1417,6 +1429,7 @@ retry: blkg->pd[pol->plid] = pd; pd->blkg = blkg; pd->plid = pol->plid; + pd->online = false; } /* all allocated, init in the same order */ @@ -1424,9 +1437,11 @@ retry: list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) pol->pd_init_fn(blkg->pd[pol->plid]); - if (pol->pd_online_fn) - list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { + if (pol->pd_online_fn) pol->pd_online_fn(blkg->pd[pol->plid]); + blkg->pd[pol->plid]->online = true; + } __set_bit(pol->plid, q->blkcg_pols); ret = 0; @@ -1488,7 +1503,7 @@ void blkcg_deactivate_policy(struct request_queue *q, spin_lock(&blkcg->lock); if (blkg->pd[pol->plid]) { - if (pol->pd_offline_fn) + if (blkg->pd[pol->plid]->online && pol->pd_offline_fn) pol->pd_offline_fn(blkg->pd[pol->plid]); pol->pd_free_fn(blkg->pd[pol->plid]); blkg->pd[pol->plid] = NULL; diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index aa2b286bc825..59815b269a20 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h @@ -125,6 +125,7 @@ struct blkg_policy_data { /* the blkg and policy id this per-policy data belongs to */ struct blkcg_gq *blkg; int plid; + bool online; }; /* diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 772e909e9fbf..20993d79d5cc 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -1082,7 +1082,14 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum, iocg->child_active_sum); } else { - inuse = clamp_t(u32, inuse, 1, active); + /* + * It may be tempting to turn this into a clamp expression with + * a lower limit of 1 but active may be 0, which cannot be used + * as an upper limit in that situation. This expression allows + * active to clamp inuse unless it is 0, in which case inuse + * becomes 1. + */ + inuse = min(inuse, active) ?: 1; } iocg->last_inuse = iocg->inuse; @@ -1423,8 +1430,11 @@ static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay, lockdep_assert_held(&iocg->ioc->lock); lockdep_assert_held(&iocg->waitq.lock); - /* make sure that nobody messed with @iocg */ - WARN_ON_ONCE(list_empty(&iocg->active_list)); + /* + * make sure that nobody messed with @iocg. Check iocg->pd.online + * to avoid warn when removing blkcg or disk. + */ + WARN_ON_ONCE(list_empty(&iocg->active_list) && iocg->pd.online); WARN_ON_ONCE(iocg->inuse > 1); iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); diff --git a/block/blk-merge.c b/block/blk-merge.c index 13a47b37acb7..b90c413c83c4 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -247,6 +247,14 @@ static bool bvec_split_segs(struct queue_limits *lim, const struct bio_vec *bv, return len > 0 || bv->bv_len > max_len; } +static unsigned int bio_split_alignment(struct bio *bio, + const struct queue_limits *lim) +{ + if (op_is_write(bio_op(bio)) && lim->zone_write_granularity) + return lim->zone_write_granularity; + return lim->logical_block_size; +} + /** * bio_split_rw - split a bio in two bios * @bio: [in] bio to be split @@ -317,7 +325,7 @@ split: * split size so that each bio is properly block size aligned, even if * we do not use the full hardware limits. */ - bytes = ALIGN_DOWN(bytes, lim->logical_block_size); + bytes = ALIGN_DOWN(bytes, bio_split_alignment(bio, lim)); /* * Bio splitting may cause subtle trouble such as hang when doing sync diff --git a/block/blk-mq.c b/block/blk-mq.c index 542b28a2e6b0..373a67a630f3 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2475,6 +2475,12 @@ void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) return; clear_bit(BLK_MQ_S_STOPPED, &hctx->state); + /* + * Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the + * clearing of BLK_MQ_S_STOPPED above and the checking of dispatch + * list in the subsequent routine. + */ + smp_mb__after_atomic(); blk_mq_run_hw_queue(hctx, async); } EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue); @@ -3665,16 +3671,11 @@ static int blk_mq_init_hctx(struct request_queue *q, { hctx->queue_num = hctx_idx; - if (!(hctx->flags & BLK_MQ_F_STACKING)) - cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, - &hctx->cpuhp_online); - cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); - hctx->tags = set->tags[hctx_idx]; if (set->ops->init_hctx && set->ops->init_hctx(hctx, set->driver_data, hctx_idx)) - goto unregister_cpu_notifier; + goto fail; if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, hctx->numa_node)) @@ -3683,6 +3684,11 @@ static int blk_mq_init_hctx(struct request_queue *q, if (xa_insert(&q->hctx_table, hctx_idx, hctx, GFP_KERNEL)) goto exit_flush_rq; + if (!(hctx->flags & BLK_MQ_F_STACKING)) + cpuhp_state_add_instance_nocalls(CPUHP_AP_BLK_MQ_ONLINE, + &hctx->cpuhp_online); + cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead); + return 0; exit_flush_rq: @@ -3691,8 +3697,7 @@ static int blk_mq_init_hctx(struct request_queue *q, exit_hctx: if (set->ops->exit_hctx) set->ops->exit_hctx(hctx, hctx_idx); - unregister_cpu_notifier: - blk_mq_remove_cpuhp(hctx); + fail: return -1; } diff --git a/block/blk-mq.h b/block/blk-mq.h index c6eca452ea2a..c79deb684a95 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -178,6 +178,19 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) { + /* Fast path: hardware queue is not stopped most of the time. */ + if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state))) + return false; + + /* + * This barrier is used to order adding of dispatch list before and + * the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier + * in blk_mq_start_stopped_hw_queue() so that dispatch code could + * either see BLK_MQ_S_STOPPED is cleared or dispatch list is not + * empty to avoid missing dispatching requests. + */ + smp_mb(); + return test_bit(BLK_MQ_S_STOPPED, &hctx->state); } diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index a82bdec923b2..c74e8273511a 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -858,10 +858,8 @@ unlock: * faster to shut down and is made fully functional here as * request_queues for non-existent devices never get registered. */ - if (!blk_queue_init_done(q)) { - blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); - percpu_ref_switch_to_percpu(&q->q_usage_counter); - } + blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); + percpu_ref_switch_to_percpu(&q->q_usage_counter); return ret; diff --git a/block/fops.c b/block/fops.c index 01cb6260fa24..b02fe200c3ec 100644 --- a/block/fops.c +++ b/block/fops.c @@ -601,11 +601,12 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) file_accessed(iocb->ki_filp); ret = blkdev_direct_IO(iocb, to); - if (ret >= 0) { + if (ret > 0) { iocb->ki_pos += ret; count -= ret; } - iov_iter_revert(to, count - iov_iter_count(to)); + if (ret != -EIOCBQUEUED) + iov_iter_revert(to, count - iov_iter_count(to)); if (ret < 0 || !count) goto reexpand; } diff --git a/block/genhd.c b/block/genhd.c index 146ce13b192b..1cb517969607 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -685,13 +685,10 @@ void del_gendisk(struct gendisk *disk) * If the disk does not own the queue, allow using passthrough requests * again. Else leave the queue frozen to fail all I/O. */ - if (!test_bit(GD_OWNS_QUEUE, &disk->state)) { - blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q); + if (!test_bit(GD_OWNS_QUEUE, &disk->state)) __blk_mq_unfreeze_queue(q, true); - } else { - if (queue_is_mq(q)) - blk_mq_exit_queue(q); - } + else if (queue_is_mq(q)) + blk_mq_exit_queue(q); } EXPORT_SYMBOL(del_gendisk); @@ -741,7 +738,7 @@ static ssize_t disk_badblocks_store(struct device *dev, } #ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD -void blk_request_module(dev_t devt) +static bool blk_probe_dev(dev_t devt) { unsigned int major = MAJOR(devt); struct blk_major_name **n; @@ -751,14 +748,26 @@ void blk_request_module(dev_t devt) if ((*n)->major == major && (*n)->probe) { (*n)->probe(devt); mutex_unlock(&major_names_lock); - return; + return true; } } mutex_unlock(&major_names_lock); + return false; +} - if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) - /* Make old-style 2.4 aliases work */ - request_module("block-major-%d", MAJOR(devt)); +void blk_request_module(dev_t devt) +{ + int error; + + if (blk_probe_dev(devt)) + return; + + error = request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)); + /* Make old-style 2.4 aliases work */ + if (error > 0) + error = request_module("block-major-%d", MAJOR(devt)); + if (!error) + blk_probe_dev(devt); } #endif /* CONFIG_BLOCK_LEGACY_AUTOLOAD */ diff --git a/block/ioctl.c b/block/ioctl.c index c7390d8c9fc7..552da0ccbec0 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -115,7 +115,7 @@ static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode, return -EINVAL; filemap_invalidate_lock(inode->i_mapping); - err = truncate_bdev_range(bdev, mode, start, start + len - 1); + err = truncate_bdev_range(bdev, mode, start, end - 1); if (err) goto fail; err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL); @@ -127,7 +127,7 @@ fail: static int blk_ioctl_secure_erase(struct block_device *bdev, fmode_t mode, void __user *argp) { - uint64_t start, len; + uint64_t start, len, end; uint64_t range[2]; int err; @@ -142,11 +142,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, fmode_t mode, len = range[1]; if ((start & 511) || (len & 511)) return -EINVAL; - if (start + len > bdev_nr_bytes(bdev)) + if (check_add_overflow(start, len, &end) || + end > bdev_nr_bytes(bdev)) return -EINVAL; filemap_invalidate_lock(bdev->bd_inode->i_mapping); - err = truncate_bdev_range(bdev, mode, start, start + len - 1); + err = truncate_bdev_range(bdev, mode, start, end - 1); if (!err) err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9, GFP_KERNEL); diff --git a/block/partitions/efi.c b/block/partitions/efi.c index 5e9be13a56a8..7acba66eed48 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c @@ -682,7 +682,7 @@ static void utf16_le_to_7bit(const __le16 *in, unsigned int size, u8 *out) out[size] = 0; while (i < size) { - u8 c = le16_to_cpu(in[i]) & 0xff; + u8 c = le16_to_cpu(in[i]) & 0x7f; if (c && !isprint(c)) c = '!'; diff --git a/block/partitions/ldm.h b/block/partitions/ldm.h index 0a747a0c782d..f98dbee94149 100644 --- a/block/partitions/ldm.h +++ b/block/partitions/ldm.h @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -/** +/* * ldm - Part of the Linux-NTFS project. * * Copyright (C) 2001,2002 Richard Russon diff --git a/block/partitions/mac.c b/block/partitions/mac.c index 7b521df00a39..6415213cd184 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c @@ -51,13 +51,25 @@ int mac_partition(struct parsed_partitions *state) } secsize = be16_to_cpu(md->block_size); put_dev_sector(sect); + + /* + * If the "block size" is not a power of 2, things get weird - we might + * end up with a partition straddling a sector boundary, so we wouldn't + * be able to read a partition entry with read_part_sector(). + * Real block sizes are probably (?) powers of two, so just require + * that. + */ + if (!is_power_of_2(secsize)) + return -1; datasize = round_down(secsize, 512); data = read_part_sector(state, datasize / 512, §); if (!data) return -1; partoffset = secsize % 512; - if (partoffset + sizeof(*part) > datasize) + if (partoffset + sizeof(*part) > datasize) { + put_dev_sector(sect); return -1; + } part = (struct mac_partition *) (data + partoffset); if (be16_to_cpu(part->signature) != MAC_PARTITION_MAGIC) { put_dev_sector(sect); @@ -110,8 +122,8 @@ int mac_partition(struct parsed_partitions *state) int i, l; goodness++; - l = strlen(part->name); - if (strcmp(part->name, "/") == 0) + l = strnlen(part->name, sizeof(part->name)); + if (strncmp(part->name, "/", sizeof(part->name)) == 0) goodness++; for (i = 0; i <= l - 4; ++i) { if (strncasecmp(part->name + i, "root", diff --git a/certs/Makefile b/certs/Makefile index 799ad7b9e68a..67e1f2707c2f 100644 --- a/certs/Makefile +++ b/certs/Makefile @@ -84,5 +84,5 @@ targets += x509_revocation_list hostprogs := extract-cert -HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) +HOSTCFLAGS_extract-cert.o = $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) -I$(srctree)/scripts HOSTLDLIBS_extract-cert = $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto) diff --git a/certs/extract-cert.c b/certs/extract-cert.c index 8c1fb9a70d66..0a89cdc3e185 100644 --- a/certs/extract-cert.c +++ b/certs/extract-cert.c @@ -21,14 +21,17 @@ #include #include #include -#include - -/* - * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. - * - * Remove this if/when that API is no longer used - */ -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#if OPENSSL_VERSION_MAJOR >= 3 +# define USE_PKCS11_PROVIDER +# include +# include +#else +# if !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0) +# define USE_PKCS11_ENGINE +# include +# endif +#endif +#include "ssl-common.h" #define PKEY_ID_PKCS7 2 @@ -40,41 +43,6 @@ void format(void) exit(2); } -static void display_openssl_errors(int l) -{ - const char *file; - char buf[120]; - int e, line; - - if (ERR_peek_error() == 0) - return; - fprintf(stderr, "At main.c:%d:\n", l); - - while ((e = ERR_get_error_line(&file, &line))) { - ERR_error_string(e, buf); - fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line); - } -} - -static void drain_openssl_errors(void) -{ - const char *file; - int line; - - if (ERR_peek_error() == 0) - return; - while (ERR_get_error_line(&file, &line)) {} -} - -#define ERR(cond, fmt, ...) \ - do { \ - bool __cond = (cond); \ - display_openssl_errors(__LINE__); \ - if (__cond) { \ - err(1, fmt, ## __VA_ARGS__); \ - } \ - } while(0) - static const char *key_pass; static BIO *wb; static char *cert_dst; @@ -94,6 +62,66 @@ static void write_cert(X509 *x509) fprintf(stderr, "Extracted cert: %s\n", buf); } +static X509 *load_cert_pkcs11(const char *cert_src) +{ + X509 *cert = NULL; +#ifdef USE_PKCS11_PROVIDER + OSSL_STORE_CTX *store; + + if (!OSSL_PROVIDER_try_load(NULL, "pkcs11", true)) + ERR(1, "OSSL_PROVIDER_try_load(pkcs11)"); + if (!OSSL_PROVIDER_try_load(NULL, "default", true)) + ERR(1, "OSSL_PROVIDER_try_load(default)"); + + store = OSSL_STORE_open(cert_src, NULL, NULL, NULL, NULL); + ERR(!store, "OSSL_STORE_open"); + + while (!OSSL_STORE_eof(store)) { + OSSL_STORE_INFO *info = OSSL_STORE_load(store); + + if (!info) { + drain_openssl_errors(__LINE__, 0); + continue; + } + if (OSSL_STORE_INFO_get_type(info) == OSSL_STORE_INFO_CERT) { + cert = OSSL_STORE_INFO_get1_CERT(info); + ERR(!cert, "OSSL_STORE_INFO_get1_CERT"); + } + OSSL_STORE_INFO_free(info); + if (cert) + break; + } + OSSL_STORE_close(store); +#elif defined(USE_PKCS11_ENGINE) + ENGINE *e; + struct { + const char *cert_id; + X509 *cert; + } parms; + + parms.cert_id = cert_src; + parms.cert = NULL; + + ENGINE_load_builtin_engines(); + drain_openssl_errors(__LINE__, 1); + e = ENGINE_by_id("pkcs11"); + ERR(!e, "Load PKCS#11 ENGINE"); + if (ENGINE_init(e)) + drain_openssl_errors(__LINE__, 1); + else + ERR(1, "ENGINE_init"); + if (key_pass) + ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN"); + ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1); + ERR(!parms.cert, "Get X.509 from PKCS#11"); + cert = parms.cert; +#else + fprintf(stderr, "no pkcs11 engine/provider available\n"); + exit(1); +#endif + return cert; +} + int main(int argc, char **argv) { char *cert_src; @@ -119,28 +147,10 @@ int main(int argc, char **argv) fclose(f); exit(0); } else if (!strncmp(cert_src, "pkcs11:", 7)) { - ENGINE *e; - struct { - const char *cert_id; - X509 *cert; - } parms; + X509 *cert = load_cert_pkcs11(cert_src); - parms.cert_id = cert_src; - parms.cert = NULL; - - ENGINE_load_builtin_engines(); - drain_openssl_errors(); - e = ENGINE_by_id("pkcs11"); - ERR(!e, "Load PKCS#11 ENGINE"); - if (ENGINE_init(e)) - drain_openssl_errors(); - else - ERR(1, "ENGINE_init"); - if (key_pass) - ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN"); - ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1); - ERR(!parms.cert, "Get X.509 from PKCS#11"); - write_cert(parms.cert); + ERR(!cert, "load_cert_pkcs11 failed"); + write_cert(cert); } else { BIO *b; X509 *x509; diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 1d017ec5c63c..84f4e9c2b5d9 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -263,10 +263,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags, return err; err = crypto_ahash_import(&ctx2->req, state); - if (err) { - sock_orphan(sk2); - sock_put(sk2); - } return err; } diff --git a/crypto/api.c b/crypto/api.c index 64f2d365a8e9..4308b1c8ca2e 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -409,6 +409,7 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, goto out_err; tfm->__crt_alg = alg; + refcount_set(&tfm->refcnt, 1); err = crypto_init_ops(tfm, type, mask); if (err) @@ -488,26 +489,43 @@ err: } EXPORT_SYMBOL_GPL(crypto_alloc_base); -void *crypto_create_tfm_node(struct crypto_alg *alg, - const struct crypto_type *frontend, - int node) +static void *crypto_alloc_tfmmem(struct crypto_alg *alg, + const struct crypto_type *frontend, int node, + gfp_t gfp) { - char *mem; - struct crypto_tfm *tfm = NULL; + struct crypto_tfm *tfm; unsigned int tfmsize; unsigned int total; - int err = -ENOMEM; + char *mem; tfmsize = frontend->tfmsize; total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); - mem = kzalloc_node(total, GFP_KERNEL, node); + mem = kzalloc_node(total, gfp, node); if (mem == NULL) - goto out_err; + return ERR_PTR(-ENOMEM); tfm = (struct crypto_tfm *)(mem + tfmsize); tfm->__crt_alg = alg; tfm->node = node; + refcount_set(&tfm->refcnt, 1); + + return mem; +} + +void *crypto_create_tfm_node(struct crypto_alg *alg, + const struct crypto_type *frontend, + int node) +{ + struct crypto_tfm *tfm; + char *mem; + int err; + + mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL); + if (IS_ERR(mem)) + goto out; + + tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); err = frontend->init_tfm(tfm); if (err) @@ -524,13 +542,38 @@ out_free_tfm: if (err == -EAGAIN) crypto_shoot_alg(alg); kfree(mem); -out_err: mem = ERR_PTR(err); out: return mem; } EXPORT_SYMBOL_GPL(crypto_create_tfm_node); +void *crypto_clone_tfm(const struct crypto_type *frontend, + struct crypto_tfm *otfm) +{ + struct crypto_alg *alg = otfm->__crt_alg; + struct crypto_tfm *tfm; + char *mem; + + mem = ERR_PTR(-ESTALE); + if (unlikely(!crypto_mod_get(alg))) + goto out; + + mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC); + if (IS_ERR(mem)) { + crypto_mod_put(alg); + goto out; + } + + tfm = (struct crypto_tfm *)(mem + frontend->tfmsize); + tfm->crt_flags = otfm->crt_flags; + tfm->exit = otfm->exit; + +out: + return mem; +} +EXPORT_SYMBOL_GPL(crypto_clone_tfm); + struct crypto_alg *crypto_find_alg(const char *alg_name, const struct crypto_type *frontend, u32 type, u32 mask) @@ -620,6 +663,8 @@ void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm) if (IS_ERR_OR_NULL(mem)) return; + if (!refcount_dec_and_test(&tfm->refcnt)) + return; alg = tfm->__crt_alg; if (!tfm->exit && alg->cra_exit) diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c index 5b84b0f7cc17..337867028653 100644 --- a/crypto/crypto_null.c +++ b/crypto/crypto_null.c @@ -17,10 +17,10 @@ #include #include #include -#include +#include #include -static DEFINE_MUTEX(crypto_default_null_skcipher_lock); +static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock); static struct crypto_sync_skcipher *crypto_default_null_skcipher; static int crypto_default_null_skcipher_refcnt; @@ -152,23 +152,32 @@ MODULE_ALIAS_CRYPTO("cipher_null"); struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void) { + struct crypto_sync_skcipher *ntfm = NULL; struct crypto_sync_skcipher *tfm; - mutex_lock(&crypto_default_null_skcipher_lock); + spin_lock_bh(&crypto_default_null_skcipher_lock); tfm = crypto_default_null_skcipher; if (!tfm) { - tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); - if (IS_ERR(tfm)) - goto unlock; + spin_unlock_bh(&crypto_default_null_skcipher_lock); - crypto_default_null_skcipher = tfm; + ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0); + if (IS_ERR(ntfm)) + return ntfm; + + spin_lock_bh(&crypto_default_null_skcipher_lock); + tfm = crypto_default_null_skcipher; + if (!tfm) { + tfm = ntfm; + ntfm = NULL; + crypto_default_null_skcipher = tfm; + } } crypto_default_null_skcipher_refcnt++; + spin_unlock_bh(&crypto_default_null_skcipher_lock); -unlock: - mutex_unlock(&crypto_default_null_skcipher_lock); + crypto_free_sync_skcipher(ntfm); return tfm; } @@ -176,12 +185,16 @@ EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher); void crypto_put_default_null_skcipher(void) { - mutex_lock(&crypto_default_null_skcipher_lock); + struct crypto_sync_skcipher *tfm = NULL; + + spin_lock_bh(&crypto_default_null_skcipher_lock); if (!--crypto_default_null_skcipher_refcnt) { - crypto_free_sync_skcipher(crypto_default_null_skcipher); + tfm = crypto_default_null_skcipher; crypto_default_null_skcipher = NULL; } - mutex_unlock(&crypto_default_null_skcipher_lock); + spin_unlock_bh(&crypto_default_null_skcipher_lock); + + crypto_free_sync_skcipher(tfm); } EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher); diff --git a/crypto/internal.h b/crypto/internal.h index c08385571853..e81cd7594b35 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -85,6 +86,8 @@ struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, u32 mask); void *crypto_create_tfm_node(struct crypto_alg *alg, const struct crypto_type *frontend, int node); +void *crypto_clone_tfm(const struct crypto_type *frontend, + struct crypto_tfm *otfm); static inline void *crypto_create_tfm(struct crypto_alg *alg, const struct crypto_type *frontend) @@ -166,5 +169,10 @@ static inline int crypto_is_test_larval(struct crypto_larval *larval) return larval->alg.cra_driver_name[0]; } +static inline struct crypto_tfm *crypto_tfm_get(struct crypto_tfm *tfm) +{ + return refcount_inc_not_zero(&tfm->refcnt) ? tfm : ERR_PTR(-EOVERFLOW); +} + #endif /* _CRYPTO_INTERNAL_H */ diff --git a/crypto/lzo-rle.c b/crypto/lzo-rle.c index 0631d975bfac..0abc2d87f042 100644 --- a/crypto/lzo-rle.c +++ b/crypto/lzo-rle.c @@ -55,7 +55,7 @@ static int __lzorle_compress(const u8 *src, unsigned int slen, size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzorle1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzorle1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; diff --git a/crypto/lzo.c b/crypto/lzo.c index ebda132dd22b..8338851c7406 100644 --- a/crypto/lzo.c +++ b/crypto/lzo.c @@ -55,7 +55,7 @@ static int __lzo_compress(const u8 *src, unsigned int slen, size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */ int err; - err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx); + err = lzo1x_1_compress_safe(src, slen, dst, &tmp_len, ctx); if (err != LZO_E_OK) return -EINVAL; diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c index 005a36cb21bc..2d7f98709e97 100644 --- a/crypto/pcrypt.c +++ b/crypto/pcrypt.c @@ -117,8 +117,10 @@ static int pcrypt_aead_encrypt(struct aead_request *req) err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; - if (err == -EBUSY) - return -EAGAIN; + if (err == -EBUSY) { + /* try non-parallel mode */ + return crypto_aead_encrypt(creq); + } return err; } @@ -166,8 +168,10 @@ static int pcrypt_aead_decrypt(struct aead_request *req) err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu); if (!err) return -EINPROGRESS; - if (err == -EBUSY) - return -EAGAIN; + if (err == -EBUSY) { + /* try non-parallel mode */ + return crypto_aead_decrypt(creq); + } return err; } diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 473241b5193f..596e96d3b3bd 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -438,7 +438,7 @@ config ACPI_SBS the modules will be called sbs and sbshc. config ACPI_HED - tristate "Hardware Error Device" + bool "Hardware Error Device" help This driver supports the Hardware Error Device (PNP0C33), which is used to report some hardware errors notified via diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c index 0a8372bf6a77..6fa6b485e30d 100644 --- a/drivers/acpi/acpica/evxfregn.c +++ b/drivers/acpi/acpica/evxfregn.c @@ -201,8 +201,6 @@ acpi_remove_address_space_handler(acpi_handle device, /* Now we can delete the handler object */ - acpi_os_release_mutex(handler_obj->address_space. - context_mutex); acpi_ut_remove_reference(handler_obj); goto unlock_and_exit; } diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index dd808cf65c84..83a4b417b27b 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -155,8 +155,6 @@ static unsigned long ghes_estatus_pool_size_request; static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; static atomic_t ghes_estatus_cache_alloced; -static int ghes_panic_timeout __read_mostly = 30; - static void __iomem *ghes_map(u64 pfn, enum fixed_addresses fixmap_idx) { phys_addr_t paddr; @@ -858,14 +856,16 @@ static void __ghes_panic(struct ghes *ghes, struct acpi_hest_generic_status *estatus, u64 buf_paddr, enum fixed_addresses fixmap_idx) { + const char *msg = GHES_PFX "Fatal hardware error"; + __ghes_print_estatus(KERN_EMERG, ghes->generic, estatus); ghes_clear_estatus(ghes, estatus, buf_paddr, fixmap_idx); - /* reboot to log the error! */ if (!panic_timeout) - panic_timeout = ghes_panic_timeout; - panic("Fatal hardware error!"); + pr_emerg("%s but panic disabled\n", msg); + + panic(msg); } static int ghes_proc(struct ghes *ghes) diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c index c0e77c1c8e09..eb6c2d360387 100644 --- a/drivers/acpi/arm64/gtdt.c +++ b/drivers/acpi/arm64/gtdt.c @@ -283,7 +283,7 @@ error: if (frame->virt_irq > 0) acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt); frame->virt_irq = 0; - } while (i-- >= 0 && gtdt_frame--); + } while (i-- > 0 && gtdt_frame--); return -EINVAL; } diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 31ea76b6fa04..0e1fb97d5d76 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -1136,7 +1136,6 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) return -EFAULT; } val = MASK_VAL_WRITE(reg, prev_val, val); - val |= prev_val; } switch (size) { diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 63803091f8b1..577698739090 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -2260,6 +2260,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = { DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"), }, }, + /* + * Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC + * https://gitlab.freedesktop.org/drm/amd/-/issues/3929 + */ + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83L3"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83N6"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"), + } + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), + } + }, { }, }; diff --git a/drivers/acpi/fan_core.c b/drivers/acpi/fan_core.c index 52a0b303b70a..36907331a669 100644 --- a/drivers/acpi/fan_core.c +++ b/drivers/acpi/fan_core.c @@ -366,19 +366,25 @@ static int acpi_fan_probe(struct platform_device *pdev) result = sysfs_create_link(&pdev->dev.kobj, &cdev->device.kobj, "thermal_cooling"); - if (result) + if (result) { dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n"); + goto err_unregister; + } result = sysfs_create_link(&cdev->device.kobj, &pdev->dev.kobj, "device"); if (result) { dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n"); - goto err_end; + goto err_remove_link; } return 0; +err_remove_link: + sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling"); +err_unregister: + thermal_cooling_device_unregister(cdev); err_end: if (fan->acpi4) acpi_fan_delete_attributes(device); diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index 60a2939cde6c..e8e9b1ac06b8 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c @@ -72,7 +72,12 @@ static struct acpi_driver acpi_hed_driver = { .notify = acpi_hed_notify, }, }; -module_acpi_driver(acpi_hed_driver); + +static int __init acpi_hed_driver_init(void) +{ + return acpi_bus_register_driver(&acpi_hed_driver); +} +subsys_initcall(acpi_hed_driver_init); MODULE_AUTHOR("Huang Ying"); MODULE_DESCRIPTION("ACPI Hardware Error Device Driver"); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 6d4ac934cd49..129c503b0951 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -454,8 +454,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, if (cmd_rc) *cmd_rc = -EINVAL; - if (cmd == ND_CMD_CALL) + if (cmd == ND_CMD_CALL) { + if (!buf || buf_len < sizeof(*call_pkg)) + return -EINVAL; + call_pkg = buf; + } + func = cmd_to_func(nfit_mem, cmd, call_pkg, &family); if (func < 0) return func; @@ -480,7 +485,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_mask = nd_desc->cmd_mask; if (cmd == ND_CMD_CALL && call_pkg->nd_family) { family = call_pkg->nd_family; - if (family > NVDIMM_BUS_FAMILY_MAX || + if (call_pkg->nd_family > NVDIMM_BUS_FAMILY_MAX || !test_bit(family, &nd_desc->bus_family_mask)) return -EINVAL; family = array_index_nospec(family, diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index d418462ab791..89f34310237c 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -22,8 +22,8 @@ static const char * const profile_names[] = { }; static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST); -static ssize_t platform_profile_choices_show(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_choices_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { int len = 0; @@ -49,8 +49,8 @@ static ssize_t platform_profile_choices_show(struct device *dev, return len; } -static ssize_t platform_profile_show(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) { enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED; @@ -77,8 +77,8 @@ static ssize_t platform_profile_show(struct device *dev, return sysfs_emit(buf, "%s\n", profile_names[profile]); } -static ssize_t platform_profile_store(struct device *dev, - struct device_attribute *attr, +static ssize_t platform_profile_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) { int err, i; @@ -115,12 +115,12 @@ static ssize_t platform_profile_store(struct device *dev, return count; } -static DEVICE_ATTR_RO(platform_profile_choices); -static DEVICE_ATTR_RW(platform_profile); +static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices); +static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile); static struct attribute *platform_profile_attrs[] = { - &dev_attr_platform_profile_choices.attr, - &dev_attr_platform_profile.attr, + &attr_platform_profile_choices.attr, + &attr_platform_profile.attr, NULL }; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index ced3eb15bd8b..1938e4778725 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -217,18 +217,20 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr, node_entry = ACPI_PTR_DIFF(node, table_hdr); entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, sizeof(struct acpi_table_pptt)); - proc_sz = sizeof(struct acpi_pptt_processor *); + proc_sz = sizeof(struct acpi_pptt_processor); - while ((unsigned long)entry + proc_sz < table_end) { + /* ignore subtable types that are smaller than a processor node */ + while ((unsigned long)entry + proc_sz <= table_end) { cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && cpu_node->parent == node_entry) return 0; if (entry->length == 0) return 0; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, entry->length); - } return 1; } @@ -258,18 +260,21 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he table_end = (unsigned long)table_hdr + table_hdr->length; entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, sizeof(struct acpi_table_pptt)); - proc_sz = sizeof(struct acpi_pptt_processor *); + proc_sz = sizeof(struct acpi_pptt_processor); /* find the processor structure associated with this cpuid */ - while ((unsigned long)entry + proc_sz < table_end) { + while ((unsigned long)entry + proc_sz <= table_end) { cpu_node = (struct acpi_pptt_processor *)entry; if (entry->length == 0) { pr_warn("Invalid zero length subtable\n"); break; } + /* entry->length may not equal proc_sz, revalidate the processor structure length */ if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && acpi_cpu_id == cpu_node->acpi_processor_id && + (unsigned long)entry + entry->length <= table_end && + entry->length == proc_sz + cpu_node->number_of_priv_resources * sizeof(u32) && acpi_pptt_leaf_node(table_hdr, cpu_node)) { return (struct acpi_pptt_processor *)entry; } diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c index dd0261fd12d6..7747ca4168ab 100644 --- a/drivers/acpi/prmt.c +++ b/drivers/acpi/prmt.c @@ -263,9 +263,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function, if (!handler || !module) goto invalid_guid; - if (!handler->handler_addr || - !handler->static_data_buffer_addr || - !handler->acpi_param_buffer_addr) { + if (!handler->handler_addr) { buffer->prm_status = PRM_HANDLER_ERROR; return AE_OK; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 18f4334a9691..3386d01c1f15 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -268,6 +268,10 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x", pr->power.states[ACPI_STATE_C3].address); + if (!pr->power.states[ACPI_STATE_C2].address && + !pr->power.states[ACPI_STATE_C3].address) + return -ENODEV; + return 0; } diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 62aee900af3d..f8a56aae97a5 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -1128,8 +1128,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, } break; } - if (nval == 0) - return -EINVAL; if (obj->type == ACPI_TYPE_BUFFER) { if (proptype != DEV_PROP_U8) @@ -1153,9 +1151,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data, ret = acpi_copy_property_array_uint(items, (u64 *)val, nval); break; case DEV_PROP_STRING: - ret = acpi_copy_property_array_string( - items, (char **)val, - min_t(u32, nval, obj->package.count)); + nval = min_t(u32, nval, obj->package.count); + if (nval == 0) + return -ENODATA; + + ret = acpi_copy_property_array_string(items, (char **)val, nval); break; default: ret = -EINVAL; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index d57bc814dec4..dd513335f086 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win, switch (addr->resource_type) { case ACPI_MEMORY_RANGE: acpi_dev_memresource_flags(res, len, wp); + + if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY) + res->flags |= IORESOURCE_PREFETCH; break; case ACPI_IO_RANGE: acpi_dev_ioresource_flags(res, len, iodec, @@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win, if (addr->producer_consumer == ACPI_PRODUCER) res->flags |= IORESOURCE_WINDOW; - if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY) - res->flags |= IORESOURCE_PREFETCH; - return !(res->flags & IORESOURCE_DISABLED); } @@ -439,6 +439,20 @@ static const struct dmi_system_id asus_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), }, }, + { + /* Asus Vivobook X1404VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1404VAP"), + }, + }, + { + /* Asus Vivobook X1504VAP */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "X1504VAP"), + }, + }, { /* Asus Vivobook X1704VAP */ .matches = { @@ -546,6 +560,12 @@ static const struct dmi_system_id maingear_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "RP-15"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."), + DMI_MATCH(DMI_BOARD_NAME, "MECH-17"), + }, + }, { /* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */ .matches = { @@ -615,6 +635,17 @@ static const struct dmi_system_id lg_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "GMxHGxx"), }, }, + { + /* + * TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the + * board-name is changed, so check OEM strings instead. Note + * OEM string matches are always exact matches. + * https://bugzilla.kernel.org/show_bug.cgi?id=219614 + */ + .matches = { + DMI_EXACT_MATCH(DMI_OEM_STRING, "GM5HG0A"), + }, + }, { } }; @@ -645,11 +676,11 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, for (i = 0; i < ARRAY_SIZE(override_table); i++) { const struct irq_override_cmp *entry = &override_table[i]; - if (dmi_check_system(entry->system) && - entry->irq == gsi && + if (entry->irq == gsi && entry->triggering == triggering && entry->polarity == polarity && - entry->shareable == shareable) + entry->shareable == shareable && + dmi_check_system(entry->system)) return entry->override; } diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 20f53ae4d204..a4b0a499b67d 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -592,6 +592,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215), + .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235), diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 3f327ba759fd..586982a2a61f 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -1510,8 +1510,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev, tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf.command = ATA_CMD_PACKET; - /* is it pointless to prefer PIO for "safety reasons"? */ - if (ap->flags & ATA_FLAG_PIO_DMA) { + /* + * Do not use DMA if the connected device only supports PIO, even if the + * port prefers PIO commands via DMA. + * + * Ideally, we should call atapi_check_dma() to check if it is safe for + * the LLD to use DMA for REQUEST_SENSE, but we don't have a qc. + * Since we can't check the command, perhaps we should only use pio? + */ + if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) { tf.protocol = ATAPI_PROT_DMA; tf.feature |= ATAPI_PKT_DMA; } else { diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 7916e369e15e..3601fd44430f 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -658,7 +658,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct page *page; - unsigned int offset; + unsigned int offset, count; if (!qc->cursg) { qc->curbytes = qc->nbytes; @@ -674,25 +674,27 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) page = nth_page(page, (offset >> PAGE_SHIFT)); offset %= PAGE_SIZE; - trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size); + /* don't overrun current sg */ + count = min(qc->cursg->length - qc->cursg_ofs, qc->sect_size); + + trace_ata_sff_pio_transfer_data(qc, offset, count); /* * Split the transfer when it splits a page boundary. Note that the * split still has to be dword aligned like all ATA data transfers. */ WARN_ON_ONCE(offset % 4); - if (offset + qc->sect_size > PAGE_SIZE) { + if (offset + count > PAGE_SIZE) { unsigned int split_len = PAGE_SIZE - offset; ata_pio_xfer(qc, page, offset, split_len); - ata_pio_xfer(qc, nth_page(page, 1), 0, - qc->sect_size - split_len); + ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len); } else { - ata_pio_xfer(qc, page, offset, qc->sect_size); + ata_pio_xfer(qc, page, offset, count); } - qc->curbytes += qc->sect_size; - qc->cursg_ofs += qc->sect_size; + qc->curbytes += count; + qc->cursg_ofs += count; if (qc->cursg_ofs == qc->cursg->length) { qc->cursg = sg_next(qc->cursg); diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c index 985f42c4fd70..9113b74b2b67 100644 --- a/drivers/ata/pata_pxa.c +++ b/drivers/ata/pata_pxa.c @@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev) ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, resource_size(cmd_res)); + if (!ap->ioaddr.cmd_addr) + return -ENOMEM; ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, resource_size(ctl_res)); + if (!ap->ioaddr.ctl_addr) + return -ENOMEM; ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, resource_size(dma_res)); + if (!ap->ioaddr.bmdma_addr) + return -ENOMEM; /* * Adjust register offsets diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c index dfbf9493e451..88a44543c598 100644 --- a/drivers/ata/sata_highbank.c +++ b/drivers/ata/sata_highbank.c @@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr) phy_nodes[phy] = phy_data.np; cphy_base[phy] = of_iomap(phy_nodes[phy], 0); if (cphy_base[phy] == NULL) { + of_node_put(phy_data.np); return 0; } phy_count += 1; diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c index fa1966638c06..c524634fd926 100644 --- a/drivers/ata/sata_sx4.c +++ b/drivers/ata/sata_sx4.c @@ -1118,9 +1118,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host) mmio += PDC_CHIP0_OFS; for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++) - pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, - pdc_i2c_read_data[i].reg, - &spd0[pdc_i2c_read_data[i].ofs]); + if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, + pdc_i2c_read_data[i].reg, + &spd0[pdc_i2c_read_data[i].ofs])) { + dev_err(host->dev, + "Failed in i2c read at index %d: device=%#x, reg=%#x\n", + i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg); + return -EIO; + } data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4); data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) | @@ -1285,6 +1290,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host) /* Programming DIMM0 Module Control Register (index_CID0:80h) */ size = pdc20621_prog_dimm0(host); + if (size < 0) + return size; dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size); /* Programming DIMM Module Global Control Register (index_CID0:88h) */ diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c index 6d309e4971b6..e243291a7e77 100644 --- a/drivers/auxdisplay/charlcd.c +++ b/drivers/auxdisplay/charlcd.c @@ -594,18 +594,19 @@ static int charlcd_init(struct charlcd *lcd) return 0; } -struct charlcd *charlcd_alloc(void) +struct charlcd *charlcd_alloc(unsigned int drvdata_size) { struct charlcd_priv *priv; struct charlcd *lcd; - priv = kzalloc(sizeof(*priv), GFP_KERNEL); + priv = kzalloc(sizeof(*priv) + drvdata_size, GFP_KERNEL); if (!priv) return NULL; priv->esc_seq.len = -1; lcd = &priv->lcd; + lcd->drvdata = priv->drvdata; return lcd; } diff --git a/drivers/auxdisplay/charlcd.h b/drivers/auxdisplay/charlcd.h index eed80063a6d2..4bbf106b2dd8 100644 --- a/drivers/auxdisplay/charlcd.h +++ b/drivers/auxdisplay/charlcd.h @@ -49,7 +49,7 @@ struct charlcd { unsigned long y; } addr; - void *drvdata; + void *drvdata; /* Set by charlcd_alloc() */ }; /** @@ -93,7 +93,8 @@ struct charlcd_ops { }; void charlcd_backlight(struct charlcd *lcd, enum charlcd_onoff on); -struct charlcd *charlcd_alloc(void); + +struct charlcd *charlcd_alloc(unsigned int drvdata_size); void charlcd_free(struct charlcd *lcd); int charlcd_register(struct charlcd *lcd); diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c index d56a5d508ccd..ebaf0ff518f4 100644 --- a/drivers/auxdisplay/hd44780.c +++ b/drivers/auxdisplay/hd44780.c @@ -226,7 +226,7 @@ static int hd44780_probe(struct platform_device *pdev) if (!hdc) return -ENOMEM; - lcd = charlcd_alloc(); + lcd = charlcd_alloc(0); if (!lcd) goto fail1; @@ -313,13 +313,13 @@ static int hd44780_probe(struct platform_device *pdev) fail3: kfree(hd); fail2: - kfree(lcd); + charlcd_free(lcd); fail1: kfree(hdc); return ret; } -static int hd44780_remove(struct platform_device *pdev) +static void hd44780_remove(struct platform_device *pdev) { struct charlcd *lcd = platform_get_drvdata(pdev); struct hd44780_common *hdc = lcd->drvdata; @@ -328,8 +328,7 @@ static int hd44780_remove(struct platform_device *pdev) kfree(hdc->hd44780); kfree(lcd->drvdata); - kfree(lcd); - return 0; + charlcd_free(lcd); } static const struct of_device_id hd44780_of_match[] = { @@ -340,7 +339,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match); static struct platform_driver hd44780_driver = { .probe = hd44780_probe, - .remove = hd44780_remove, + .remove_new = hd44780_remove, .driver = { .name = "hd44780", .of_match_table = hd44780_of_match, diff --git a/drivers/auxdisplay/lcd2s.c b/drivers/auxdisplay/lcd2s.c index 135831a16514..2b597f226c0c 100644 --- a/drivers/auxdisplay/lcd2s.c +++ b/drivers/auxdisplay/lcd2s.c @@ -307,7 +307,7 @@ static int lcd2s_i2c_probe(struct i2c_client *i2c) if (err < 0) return err; - lcd = charlcd_alloc(); + lcd = charlcd_alloc(0); if (!lcd) return -ENOMEM; diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index eba04c0de7eb..0f3999b665e7 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -835,7 +835,7 @@ static void lcd_init(void) if (!hdc) return; - charlcd = charlcd_alloc(); + charlcd = charlcd_alloc(0); if (!charlcd) { kfree(hdc); return; diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 339a9edcde5f..941532ddfdc6 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -853,6 +853,8 @@ bus_devices_fail: bus_remove_file(bus, &bus_attr_uevent); bus_uevent_fail: kset_unregister(&bus->p->subsys); + /* Above kset_unregister() will kfree @bus->p */ + bus->p = NULL; out: kfree(bus->p); bus->p = NULL; diff --git a/drivers/base/core.c b/drivers/base/core.c index 30204e62497c..d985c4b87de5 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -137,7 +137,7 @@ static void __fwnode_link_del(struct fwnode_link *link) */ static void __fwnode_link_cycle(struct fwnode_link *link) { - pr_debug("%pfwf: Relaxing link with %pfwf\n", + pr_debug("%pfwf: cycle: depends on %pfwf\n", link->consumer, link->supplier); link->flags |= FWLINK_FLAG_CYCLE; } @@ -1082,7 +1082,8 @@ static struct fwnode_handle *fwnode_links_check_suppliers( return NULL; list_for_each_entry(link, &fwnode->suppliers, c_hook) - if (!(link->flags & FWLINK_FLAG_CYCLE)) + if (!(link->flags & + (FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE))) return link->supplier; return NULL; @@ -1932,10 +1933,10 @@ static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode) * * Return true if one or more cycles were found. Otherwise, return false. */ -static bool __fw_devlink_relax_cycles(struct device *con, +static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle, struct fwnode_handle *sup_handle) { - struct device *sup_dev = NULL, *par_dev = NULL; + struct device *sup_dev = NULL, *par_dev = NULL, *con_dev = NULL; struct fwnode_link *link; struct device_link *dev_link; bool ret = false; @@ -1952,27 +1953,31 @@ static bool __fw_devlink_relax_cycles(struct device *con, sup_handle->flags |= FWNODE_FLAG_VISITED; - sup_dev = get_dev_from_fwnode(sup_handle); - /* Termination condition. */ - if (sup_dev == con) { + if (sup_handle == con_handle) { + pr_debug("----- cycle: start -----\n"); ret = true; goto out; } + sup_dev = get_dev_from_fwnode(sup_handle); + con_dev = get_dev_from_fwnode(con_handle); /* * If sup_dev is bound to a driver and @con hasn't started binding to a * driver, sup_dev can't be a consumer of @con. So, no need to check * further. */ if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND && - con->links.status == DL_DEV_NO_DRIVER) { + con_dev && con_dev->links.status == DL_DEV_NO_DRIVER) { ret = false; goto out; } list_for_each_entry(link, &sup_handle->suppliers, c_hook) { - if (__fw_devlink_relax_cycles(con, link->supplier)) { + if (link->flags & FWLINK_FLAG_IGNORE) + continue; + + if (__fw_devlink_relax_cycles(con_handle, link->supplier)) { __fwnode_link_cycle(link); ret = true; } @@ -1987,8 +1992,11 @@ static bool __fw_devlink_relax_cycles(struct device *con, else par_dev = fwnode_get_next_parent_dev(sup_handle); - if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode)) + if (par_dev && __fw_devlink_relax_cycles(con_handle, par_dev->fwnode)) { + pr_debug("%pfwf: cycle: child of %pfwf\n", sup_handle, + par_dev->fwnode); ret = true; + } if (!sup_dev) goto out; @@ -2002,8 +2010,10 @@ static bool __fw_devlink_relax_cycles(struct device *con, !(dev_link->flags & DL_FLAG_CYCLE)) continue; - if (__fw_devlink_relax_cycles(con, + if (__fw_devlink_relax_cycles(con_handle, dev_link->supplier->fwnode)) { + pr_debug("%pfwf: cycle: depends on %pfwf\n", sup_handle, + dev_link->supplier->fwnode); fw_devlink_relax_link(dev_link); dev_link->flags |= DL_FLAG_CYCLE; ret = true; @@ -2013,6 +2023,7 @@ static bool __fw_devlink_relax_cycles(struct device *con, out: sup_handle->flags &= ~FWNODE_FLAG_VISITED; put_device(sup_dev); + put_device(con_dev); put_device(par_dev); return ret; } @@ -2045,10 +2056,8 @@ static int fw_devlink_create_devlink(struct device *con, int ret = 0; u32 flags; - if (con->fwnode == link->consumer) - flags = fw_devlink_get_flags(link->flags); - else - flags = FW_DEVLINK_FLAGS_PERMISSIVE; + if (link->flags & FWLINK_FLAG_IGNORE) + return 0; /* * In some cases, a device P might also be a supplier to its child node @@ -2070,24 +2079,23 @@ static int fw_devlink_create_devlink(struct device *con, return -EINVAL; /* - * SYNC_STATE_ONLY device links don't block probing and supports cycles. - * So, one might expect that cycle detection isn't necessary for them. - * However, if the device link was marked as SYNC_STATE_ONLY because - * it's part of a cycle, then we still need to do cycle detection. This - * is because the consumer and supplier might be part of multiple cycles - * and we need to detect all those cycles. + * Don't try to optimize by not calling the cycle detection logic under + * certain conditions. There's always some corner case that won't get + * detected. */ - if (!device_link_flag_is_sync_state_only(flags) || - flags & DL_FLAG_CYCLE) { - device_links_write_lock(); - if (__fw_devlink_relax_cycles(con, sup_handle)) { - __fwnode_link_cycle(link); - flags = fw_devlink_get_flags(link->flags); - dev_info(con, "Fixed dependency cycle(s) with %pfwf\n", - sup_handle); - } - device_links_write_unlock(); + device_links_write_lock(); + if (__fw_devlink_relax_cycles(link->consumer, sup_handle)) { + __fwnode_link_cycle(link); + pr_debug("----- cycle: end -----\n"); + pr_info("%pfwf: Fixed dependency cycle(s) with %pfwf\n", + link->consumer, sup_handle); } + device_links_write_unlock(); + + if (con->fwnode == link->consumer) + flags = fw_devlink_get_flags(link->flags); + else + flags = FW_DEVLINK_FLAGS_PERMISSIVE; if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE) sup_dev = fwnode_get_next_parent_dev(sup_handle); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 31da94afe4f3..27aff2503765 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -595,6 +595,12 @@ ssize_t __weak cpu_show_reg_file_data_sampling(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_indirect_target_selection(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -609,6 +615,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); +static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -625,6 +632,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_gather_data_sampling.attr, &dev_attr_spec_rstack_overflow.attr, &dev_attr_reg_file_data_sampling.attr, + &dev_attr_indirect_target_selection.attr, NULL }; diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 35d1e2864696..9d0ea5c14bc5 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -684,6 +684,13 @@ int devres_release_group(struct device *dev, void *id) spin_unlock_irqrestore(&dev->devres_lock, flags); release_nodes(dev, &todo); + } else if (list_empty(&dev->devres_head)) { + /* + * dev is probably dying via devres_release_all(): groups + * have already been removed and are on the process of + * being released - don't touch and don't warn. + */ + spin_unlock_irqrestore(&dev->devres_lock, flags); } else { WARN_ON(1); spin_unlock_irqrestore(&dev->devres_lock, flags); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index e01bb359034b..3c44b0313a10 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2012,6 +2012,7 @@ free: static void genpd_free_data(struct generic_pm_domain *genpd) { + put_device(&genpd->dev); if (genpd_is_cpu_domain(genpd)) free_cpumask_var(genpd->cpus); if (genpd->free_states) diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 9c5a5f4dba5a..343d3c966e7a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -894,6 +894,9 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async) if (dev->power.syscore) goto Complete; + if (!dev->power.is_suspended) + goto Complete; + if (dev->power.direct_complete) { /* Match the pm_runtime_disable() in __device_suspend(). */ pm_runtime_enable(dev); @@ -912,9 +915,6 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async) */ dev->power.is_prepared = false; - if (!dev->power.is_suspended) - goto Unlock; - if (dev->pm_domain) { info = "power domain "; callback = pm_op(&dev->pm_domain->ops, state); @@ -954,7 +954,6 @@ static void __device_resume(struct device *dev, pm_message_t state, bool async) error = dpm_run_callback(callback, dev, state, info); dev->power.is_suspended = false; - Unlock: device_unlock(dev); dpm_watchdog_clear(&wd); @@ -1236,14 +1235,13 @@ Skip: dev->power.is_noirq_suspended = true; /* - * Skipping the resume of devices that were in use right before the - * system suspend (as indicated by their PM-runtime usage counters) - * would be suboptimal. Also resume them if doing that is not allowed - * to be skipped. + * Devices must be resumed unless they are explicitly allowed to be left + * in suspend, but even in that case skipping the resume of devices that + * were in use right before the system suspend (as indicated by their + * runtime PM usage counters and child counters) would be suboptimal. */ - if (atomic_read(&dev->power.usage_count) > 1 || - !(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && - dev->power.may_skip_resume)) + if (!(dev_pm_test_driver_flags(dev, DPM_FLAG_MAY_SKIP_RESUME) && + dev->power.may_skip_resume) || !pm_runtime_need_not_resume(dev)) dev->power.must_resume = true; if (dev->power.must_resume) @@ -1639,6 +1637,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) { pm_dev_dbg(dev, state, "direct-complete "); + dev->power.is_suspended = true; goto Complete; } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 14088b5adb55..bb68cba4d85a 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1840,7 +1840,7 @@ void pm_runtime_drop_link(struct device_link *link) pm_request_idle(link->supplier); } -static bool pm_runtime_need_not_resume(struct device *dev) +bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && (atomic_read(&dev->power.child_count) == 0 || diff --git a/drivers/base/property.c b/drivers/base/property.c index eb9b01c2ff1d..ba612087a278 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -790,7 +790,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_available_child_node); * fwnode pointer. Note that this function also puts a reference to @child * unconditionally. */ -struct fwnode_handle *device_get_next_child_node(struct device *dev, +struct fwnode_handle *device_get_next_child_node(const struct device *dev, struct fwnode_handle *child) { const struct fwnode_handle *fwnode = dev_fwnode(dev); @@ -833,7 +833,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_named_child_node); * The caller is responsible for calling fwnode_handle_put() on the returned * fwnode pointer. */ -struct fwnode_handle *device_get_named_child_node(struct device *dev, +struct fwnode_handle *device_get_named_child_node(const struct device *dev, const char *childname) { return fwnode_get_named_child_node(dev_fwnode(dev), childname); @@ -899,7 +899,7 @@ EXPORT_SYMBOL_GPL(fwnode_device_is_available); * * Return: the number of child nodes for a given device. */ -unsigned int device_get_child_node_count(struct device *dev) +unsigned int device_get_child_node_count(const struct device *dev) { struct fwnode_handle *child; unsigned int count = 0; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 18fc1c436081..dfe0da4ed47e 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -591,12 +591,16 @@ exit: return IRQ_NONE; } +static struct lock_class_key regmap_irq_lock_class; +static struct lock_class_key regmap_irq_request_class; + static int regmap_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct regmap_irq_chip_data *data = h->host_data; irq_set_chip_data(virq, data); + irq_set_lockdep_class(virq, ®map_irq_lock_class, ®map_irq_request_class); irq_set_chip(virq, &data->irq_chip); irq_set_nested_thread(virq, 1); irq_set_parent(virq, data->irq); @@ -1055,6 +1059,7 @@ err_alloc: kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); + kfree(d->main_status_buf); kfree(d->status_buf); kfree(d->status_reg_buf); if (d->virt_buf) { @@ -1135,6 +1140,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) kfree(d->wake_buf); kfree(d->mask_buf_def); kfree(d->mask_buf); + kfree(d->main_status_buf); kfree(d->status_reg_buf); kfree(d->status_buf); if (d->config_buf) { diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 140af27f591a..f0e314abcafc 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -652,6 +652,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, } EXPORT_SYMBOL_GPL(regmap_attach_dev); +static int dev_get_regmap_match(struct device *dev, void *res, void *data); + +static int regmap_detach_dev(struct device *dev, struct regmap *map) +{ + if (!dev) + return 0; + + return devres_release(dev, dev_get_regmap_release, + dev_get_regmap_match, (void *)map->name); +} + static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus, const struct regmap_config *config) { @@ -1144,13 +1155,13 @@ skip_format_initialization: /* Sanity check */ if (range_cfg->range_max < range_cfg->range_min) { - dev_err(map->dev, "Invalid range %d: %d < %d\n", i, + dev_err(map->dev, "Invalid range %d: %u < %u\n", i, range_cfg->range_max, range_cfg->range_min); goto err_range; } if (range_cfg->range_max > map->max_register) { - dev_err(map->dev, "Invalid range %d: %d > %d\n", i, + dev_err(map->dev, "Invalid range %d: %u > %u\n", i, range_cfg->range_max, map->max_register); goto err_range; } @@ -1536,6 +1547,7 @@ void regmap_exit(struct regmap *map) { struct regmap_async *async; + regmap_detach_dev(map->dev, map); regcache_exit(map); regmap_debugfs_exit(map); regmap_range_exit(map); diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 89f98be5c5b9..d293cbd253e4 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -27,9 +27,17 @@ static ssize_t name##_read(struct file *file, struct kobject *kobj, \ loff_t off, size_t count) \ { \ struct device *dev = kobj_to_dev(kobj); \ + cpumask_var_t mask; \ + ssize_t n; \ \ - return cpumap_print_bitmask_to_buf(buf, topology_##mask(dev->id), \ - off, count); \ + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \ + return -ENOMEM; \ + \ + cpumask_copy(mask, topology_##mask(dev->id)); \ + n = cpumap_print_bitmask_to_buf(buf, mask, off, count); \ + free_cpumask_var(mask); \ + \ + return n; \ } \ \ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \ @@ -37,9 +45,17 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \ loff_t off, size_t count) \ { \ struct device *dev = kobj_to_dev(kobj); \ + cpumask_var_t mask; \ + ssize_t n; \ \ - return cpumap_print_list_to_buf(buf, topology_##mask(dev->id), \ - off, count); \ + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) \ + return -ENOMEM; \ + \ + cpumask_copy(mask, topology_##mask(dev->id)); \ + n = cpumap_print_list_to_buf(buf, mask, off, count); \ + free_cpumask_var(mask); \ + \ + return n; \ } define_id_show_func(physical_package_id, "%d"); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index a8a77a1efe1e..b2c109f08808 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -368,8 +368,40 @@ __setup("ramdisk_size=", ramdisk_size); * (should share code eventually). */ static LIST_HEAD(brd_devices); +static DEFINE_MUTEX(brd_devices_mutex); static struct dentry *brd_debugfs_dir; +static struct brd_device *brd_find_or_alloc_device(int i) +{ + struct brd_device *brd; + + mutex_lock(&brd_devices_mutex); + list_for_each_entry(brd, &brd_devices, brd_list) { + if (brd->brd_number == i) { + mutex_unlock(&brd_devices_mutex); + return ERR_PTR(-EEXIST); + } + } + + brd = kzalloc(sizeof(*brd), GFP_KERNEL); + if (!brd) { + mutex_unlock(&brd_devices_mutex); + return ERR_PTR(-ENOMEM); + } + brd->brd_number = i; + list_add_tail(&brd->brd_list, &brd_devices); + mutex_unlock(&brd_devices_mutex); + return brd; +} + +static void brd_free_device(struct brd_device *brd) +{ + mutex_lock(&brd_devices_mutex); + list_del(&brd->brd_list); + mutex_unlock(&brd_devices_mutex); + kfree(brd); +} + static int brd_alloc(int i) { struct brd_device *brd; @@ -377,14 +409,9 @@ static int brd_alloc(int i) char buf[DISK_NAME_LEN]; int err = -ENOMEM; - list_for_each_entry(brd, &brd_devices, brd_list) - if (brd->brd_number == i) - return -EEXIST; - brd = kzalloc(sizeof(*brd), GFP_KERNEL); - if (!brd) - return -ENOMEM; - brd->brd_number = i; - list_add_tail(&brd->brd_list, &brd_devices); + brd = brd_find_or_alloc_device(i); + if (IS_ERR(brd)) + return PTR_ERR(brd); spin_lock_init(&brd->brd_lock); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); @@ -428,8 +455,7 @@ static int brd_alloc(int i) out_cleanup_disk: put_disk(disk); out_free_dev: - list_del(&brd->brd_list); - kfree(brd); + brd_free_device(brd); return err; } @@ -448,8 +474,7 @@ static void brd_cleanup(void) del_gendisk(brd->brd_disk); put_disk(brd->brd_disk); brd_free_pages(brd); - list_del(&brd->brd_list); - kfree(brd); + brd_free_device(brd); } } @@ -476,16 +501,6 @@ static int __init brd_init(void) { int err, i; - brd_check_and_reset_par(); - - brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); - - for (i = 0; i < rd_nr; i++) { - err = brd_alloc(i); - if (err) - goto out_free; - } - /* * brd module now has a feature to instantiate underlying device * structure on-demand, provided that there is an access dev node. @@ -501,11 +516,18 @@ static int __init brd_init(void) * dynamically. */ + brd_check_and_reset_par(); + + brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); + if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) { err = -EIO; goto out_free; } + for (i = 0; i < rd_nr; i++) + brd_alloc(i); + pr_info("brd: module loaded\n"); return 0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 041d307a2f28..e3e5d533a7ec 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -441,7 +441,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_filp = file; cmd->iocb.ki_complete = lo_rw_aio_complete; cmd->iocb.ki_flags = IOCB_DIRECT; - cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + cmd->iocb.ki_ioprio = req_get_ioprio(rq); if (rw == ITER_SOURCE) ret = call_write_iter(file, &cmd->iocb, &iter); @@ -624,19 +624,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, * dependency. */ fput(old_file); + dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); if (partscan) loop_reread_partitions(lo); error = 0; done: - /* enable and uncork uevent now that we are done */ - dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); return error; out_err: loop_global_unlock(lo, is_loop); out_putf: fput(file); + dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); goto done; } @@ -1104,8 +1105,8 @@ static int loop_configure(struct loop_device *lo, fmode_t mode, if (partscan) clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); - /* enable and uncork uevent now that we are done */ dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); + kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); loop_global_unlock(lo, is_loop); if (partscan) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 1f3cd5de4117..7f6ef0a2b4a5 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -2133,6 +2133,7 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd) flush_workqueue(nbd->recv_workq); nbd_clear_que(nbd); nbd->task_setup = NULL; + clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags); mutex_unlock(&nbd->config_lock); if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF, diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 4d78b5583dc6..e66cace433cb 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -392,13 +392,25 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev, static int nullb_apply_submit_queues(struct nullb_device *dev, unsigned int submit_queues) { - return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); + int ret; + + mutex_lock(&lock); + ret = nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues); + mutex_unlock(&lock); + + return ret; } static int nullb_apply_poll_queues(struct nullb_device *dev, unsigned int poll_queues) { - return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); + int ret; + + mutex_lock(&lock); + ret = nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues); + mutex_unlock(&lock); + + return ret; } NULLB_DEVICE_ATTR(size, ulong, NULL); @@ -444,28 +456,32 @@ static ssize_t nullb_device_power_store(struct config_item *item, if (ret < 0) return ret; + ret = count; + mutex_lock(&lock); if (!dev->power && newp) { if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) - return count; + goto out; + ret = null_add_dev(dev); if (ret) { clear_bit(NULLB_DEV_FL_UP, &dev->flags); - return ret; + goto out; } set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); dev->power = newp; + ret = count; } else if (dev->power && !newp) { if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { - mutex_lock(&lock); dev->power = newp; null_del_dev(dev->nullb); - mutex_unlock(&lock); } clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); } - return count; +out: + mutex_unlock(&lock); + return ret; } CONFIGFS_ATTR(nullb_device_, power); @@ -1764,7 +1780,7 @@ static void null_del_dev(struct nullb *nullb) dev = nullb->dev; - ida_simple_remove(&nullb_indexes, nullb->index); + ida_free(&nullb_indexes, nullb->index); list_del_init(&nullb->list); @@ -2102,15 +2118,12 @@ static int null_add_dev(struct nullb_device *dev) blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); - mutex_lock(&lock); - rv = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); - if (rv < 0) { - mutex_unlock(&lock); + rv = ida_alloc(&nullb_indexes, GFP_KERNEL); + if (rv < 0) goto out_cleanup_zone; - } + nullb->index = rv; dev->index = rv; - mutex_unlock(&lock); blk_queue_logical_block_size(nullb->q, dev->blocksize); blk_queue_physical_block_size(nullb->q, dev->blocksize); @@ -2134,9 +2147,7 @@ static int null_add_dev(struct nullb_device *dev) if (rv) goto out_ida_free; - mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); - mutex_unlock(&lock); pr_info("disk %s created\n", nullb->disk_name); @@ -2185,7 +2196,9 @@ static int null_create_dev(void) if (!dev) return -ENOMEM; + mutex_lock(&lock); ret = null_add_dev(dev); + mutex_unlock(&lock); if (ret) { null_free_dev(dev); return ret; diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index bfd643856f64..f2a99e5d304d 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -325,12 +325,21 @@ static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) return ublk_get_queue(ub, q_id)->io_cmd_buf; } +static inline int __ublk_queue_cmd_buf_size(int depth) +{ + return round_up(depth * sizeof(struct ublksrv_io_desc), PAGE_SIZE); +} + static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id) { struct ublk_queue *ubq = ublk_get_queue(ub, q_id); - return round_up(ubq->q_depth * sizeof(struct ublksrv_io_desc), - PAGE_SIZE); + return __ublk_queue_cmd_buf_size(ubq->q_depth); +} + +static int ublk_max_cmd_buf_size(void) +{ + return __ublk_queue_cmd_buf_size(UBLK_MAX_QUEUE_DEPTH); } static inline bool ublk_queue_can_use_recovery_reissue( @@ -930,7 +939,7 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma) { struct ublk_device *ub = filp->private_data; size_t sz = vma->vm_end - vma->vm_start; - unsigned max_sz = UBLK_MAX_QUEUE_DEPTH * sizeof(struct ublksrv_io_desc); + unsigned max_sz = ublk_max_cmd_buf_size(); unsigned long pfn, end, phys_off = vma->vm_pgoff << PAGE_SHIFT; int q_id, ret = 0; @@ -1864,9 +1873,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub, if (ph.len > sizeof(struct ublk_params)) ph.len = sizeof(struct ublk_params); - /* parameters can only be changed when device isn't live */ mutex_lock(&ub->mutex); - if (ub->dev_info.state == UBLK_S_DEV_LIVE) { + if (test_bit(UB_STATE_USED, &ub->state)) { + /* + * Parameters can only be changed when device hasn't + * been started yet + */ ret = -EACCES; } else if (copy_from_user(&ub->params, argp, ph.len)) { ret = -EFAULT; @@ -2046,7 +2058,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, ret = ublk_ctrl_end_recovery(ub, cmd); break; default: - ret = -ENOTSUPP; + ret = -EOPNOTSUPP; break; } if (ub) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 505026f0025c..0ba56caa41ef 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -399,18 +399,18 @@ static bool virtblk_prep_rq_batch(struct request *req) return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; } -static bool virtblk_add_req_batch(struct virtio_blk_vq *vq, +static void virtblk_add_req_batch(struct virtio_blk_vq *vq, struct request **rqlist) { + struct request *req; unsigned long flags; - int err; bool kick; spin_lock_irqsave(&vq->lock, flags); - while (!rq_list_empty(*rqlist)) { - struct request *req = rq_list_pop(rqlist); + while ((req = rq_list_pop(rqlist))) { struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); + int err; err = virtblk_add_req(vq->vq, vbr); if (err) { @@ -423,37 +423,33 @@ static bool virtblk_add_req_batch(struct virtio_blk_vq *vq, kick = virtqueue_kick_prepare(vq->vq); spin_unlock_irqrestore(&vq->lock, flags); - return kick; + if (kick) + virtqueue_notify(vq->vq); } static void virtio_queue_rqs(struct request **rqlist) { - struct request *req, *next, *prev = NULL; + struct request *submit_list = NULL; struct request *requeue_list = NULL; + struct request **requeue_lastp = &requeue_list; + struct virtio_blk_vq *vq = NULL; + struct request *req; - rq_list_for_each_safe(rqlist, req, next) { - struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx); - bool kick; + while ((req = rq_list_pop(rqlist))) { + struct virtio_blk_vq *this_vq = get_virtio_blk_vq(req->mq_hctx); - if (!virtblk_prep_rq_batch(req)) { - rq_list_move(rqlist, &requeue_list, req, prev); - req = prev; - if (!req) - continue; - } + if (vq && vq != this_vq) + virtblk_add_req_batch(vq, &submit_list); + vq = this_vq; - if (!next || req->mq_hctx != next->mq_hctx) { - req->rq_next = NULL; - kick = virtblk_add_req_batch(vq, rqlist); - if (kick) - virtqueue_notify(vq->vq); - - *rqlist = next; - prev = NULL; - } else - prev = req; + if (virtblk_prep_rq_batch(req)) + rq_list_add(&submit_list, req); /* reverse order */ + else + rq_list_add_tail(&requeue_lastp, req); } + if (vq) + virtblk_add_req_batch(vq, &submit_list); *rqlist = requeue_list; } @@ -1205,9 +1201,12 @@ static void virtblk_remove(struct virtio_device *vdev) static int virtblk_freeze(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; + struct request_queue *q = vblk->disk->queue; /* Ensure no requests in virtqueues before deleting vqs. */ - blk_mq_freeze_queue(vblk->disk->queue); + blk_mq_freeze_queue(q); + blk_mq_quiesce_queue_nowait(q); + blk_mq_unfreeze_queue(q); /* Ensure we don't receive any more interrupts */ virtio_reset_device(vdev); @@ -1231,8 +1230,8 @@ static int virtblk_restore(struct virtio_device *vdev) return ret; virtio_device_ready(vdev); + blk_mq_unquiesce_queue(vblk->disk->queue); - blk_mq_unfreeze_queue(vblk->disk->queue); return 0; } #endif diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 966aab902d19..bef5388e4e28 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -530,6 +530,12 @@ static ssize_t backing_dev_store(struct device *dev, } nr_pages = i_size_read(inode) >> PAGE_SHIFT; + /* Refuse to use zero sized device (also prevents self reference) */ + if (!nr_pages) { + err = -EINVAL; + goto out; + } + bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long); bitmap = kvzalloc(bitmap_sz, GFP_KERNEL); if (!bitmap) { @@ -1162,12 +1168,16 @@ static void zram_meta_free(struct zram *zram, u64 disksize) size_t num_pages = disksize >> PAGE_SHIFT; size_t index; + if (!zram->table) + return; + /* Free all pages that are still in this zram device */ for (index = 0; index < num_pages; index++) zram_free_page(zram, index); zs_destroy_pool(zram->mem_pool); vfree(zram->table); + zram->table = NULL; } static bool zram_meta_alloc(struct zram *zram, u64 disksize) @@ -1182,6 +1192,7 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize) zram->mem_pool = zs_create_pool(zram->disk->disk_name); if (!zram->mem_pool) { vfree(zram->table); + zram->table = NULL; return false; } @@ -1716,11 +1727,6 @@ static void zram_reset_device(struct zram *zram) zram->limit_pages = 0; - if (!init_done(zram)) { - up_write(&zram->init_lock); - return; - } - set_capacity_and_notify(zram->disk, 0); part_stat_set_all(zram->disk->part0, 0); @@ -1728,7 +1734,8 @@ static void zram_reset_device(struct zram *zram) zram_meta_free(zram, zram->disksize); zram->disksize = 0; memset(&zram->stats, 0, sizeof(zram->stats)); - zcomp_destroy(zram->comp); + if (zram->comp) + zcomp_destroy(zram->comp); zram->comp = NULL; reset_bdev(zram); diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index 35fb26cbf229..5651f40db173 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -289,6 +289,39 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) } EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); +static bool qca_filename_has_extension(const char *filename) +{ + const char *suffix = strrchr(filename, '.'); + + /* File extensions require a dot, but not as the first or last character */ + if (!suffix || suffix == filename || *(suffix + 1) == '\0') + return 0; + + /* Avoid matching directories with names that look like files with extensions */ + return !strchr(suffix, '/'); +} + +static bool qca_get_alt_nvm_file(char *filename, size_t max_size) +{ + char fwname[64]; + const char *suffix; + + /* nvm file name has an extension, replace with .bin */ + if (qca_filename_has_extension(filename)) { + suffix = strrchr(filename, '.'); + strscpy(fwname, filename, suffix - filename + 1); + snprintf(fwname + (suffix - filename), + sizeof(fwname) - (suffix - filename), ".bin"); + /* If nvm file is already the default one, return false to skip the retry. */ + if (strcmp(fwname, filename) == 0) + return false; + + snprintf(filename, max_size, "%s", fwname); + return true; + } + return false; +} + static int qca_tlv_check_data(struct hci_dev *hdev, struct qca_fw_config *config, u8 *fw_data, size_t fw_size, @@ -586,6 +619,19 @@ static int qca_download_firmware(struct hci_dev *hdev, config->fwname, ret); return ret; } + } + /* If the board-specific file is missing, try loading the default + * one, unless that was attempted already. + */ + else if (config->type == TLV_TYPE_NVM && + qca_get_alt_nvm_file(config->fwname, sizeof(config->fwname))) { + bt_dev_info(hdev, "QCA Downloading %s", config->fwname); + ret = request_firmware(&fw, config->fwname, &hdev->dev); + if (ret) { + bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", + config->fwname, ret); + return ret; + } } else { bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", config->fwname, ret); @@ -722,21 +768,38 @@ static int qca_check_bdaddr(struct hci_dev *hdev, const struct qca_fw_config *co return 0; } -static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size, +static void qca_get_nvm_name_by_board(char *fwname, size_t max_size, + const char *stem, enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, u8 rom_ver, u16 bid) { const char *variant; + const char *prefix; - /* hsp gf chip */ - if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID) - variant = "g"; - else - variant = ""; + /* Set the default value to variant and prefix */ + variant = ""; + prefix = "b"; - if (bid == 0x0) - snprintf(fwname, max_size, "qca/hpnv%02x%s.bin", rom_ver, variant); - else - snprintf(fwname, max_size, "qca/hpnv%02x%s.%x", rom_ver, variant, bid); + if (soc_type == QCA_QCA2066) + prefix = ""; + + if (soc_type == QCA_WCN6855 || soc_type == QCA_QCA2066) { + /* If the chip is manufactured by GlobalFoundries */ + if ((le32_to_cpu(ver.soc_id) & QCA_HSP_GF_SOC_MASK) == QCA_HSP_GF_SOC_ID) + variant = "g"; + } + + if (rom_ver != 0) { + if (bid == 0x0 || bid == 0xffff) + snprintf(fwname, max_size, "qca/%s%02x%s.bin", stem, rom_ver, variant); + else + snprintf(fwname, max_size, "qca/%s%02x%s.%s%02x", stem, rom_ver, + variant, prefix, bid); + } else { + if (bid == 0x0 || bid == 0xffff) + snprintf(fwname, max_size, "qca/%s%s.bin", stem, variant); + else + snprintf(fwname, max_size, "qca/%s%s.%s%02x", stem, variant, prefix, bid); + } } int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, @@ -744,6 +807,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, const char *firmware_name) { struct qca_fw_config config = {}; + const char *variant = ""; int err; u8 rom_ver = 0; u32 soc_ver; @@ -819,34 +883,39 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, /* Give the controller some time to get ready to receive the NVM */ msleep(10); - if (soc_type == QCA_QCA2066) + if (soc_type == QCA_QCA2066 || soc_type == QCA_WCN7850) qca_read_fw_board_id(hdev, &boardid); /* Download NVM configuration */ config.type = TLV_TYPE_NVM; if (firmware_name) { - snprintf(config.fwname, sizeof(config.fwname), - "qca/%s", firmware_name); + /* The firmware name has an extension, use it directly */ + if (qca_filename_has_extension(firmware_name)) { + snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name); + } else { + qca_read_fw_board_id(hdev, &boardid); + qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname), + firmware_name, soc_type, ver, 0, boardid); + } } else { switch (soc_type) { case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: - if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) { - snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02xu.bin", rom_ver); - } else { - snprintf(config.fwname, sizeof(config.fwname), - "qca/crnv%02x.bin", rom_ver); - } + if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) + variant = "u"; + + snprintf(config.fwname, sizeof(config.fwname), + "qca/crnv%02x%s.bin", rom_ver, variant); break; case QCA_WCN3988: snprintf(config.fwname, sizeof(config.fwname), "qca/apnv%02x.bin", rom_ver); break; case QCA_QCA2066: - qca_generate_hsp_nvm_name(config.fwname, - sizeof(config.fwname), ver, rom_ver, boardid); + qca_get_nvm_name_by_board(config.fwname, + sizeof(config.fwname), "hpnv", soc_type, ver, + rom_ver, boardid); break; case QCA_QCA6390: snprintf(config.fwname, sizeof(config.fwname), @@ -857,14 +926,14 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, "qca/msnv%02x.bin", rom_ver); break; case QCA_WCN6855: - snprintf(config.fwname, sizeof(config.fwname), - "qca/hpnv%02x.bin", rom_ver); + qca_read_fw_board_id(hdev, &boardid); + qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname), + "hpnv", soc_type, ver, rom_ver, boardid); break; case QCA_WCN7850: - snprintf(config.fwname, sizeof(config.fwname), - "qca/hmtnv%02x.bin", rom_ver); + qca_get_nvm_name_by_board(config.fwname, sizeof(config.fwname), + "hmtnv", soc_type, ver, rom_ver, boardid); break; - default: snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", soc_ver); diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 5671f0d9ab28..1c122613e562 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -817,6 +817,8 @@ out_free: rtl_dev_err(hdev, "mandatory config file %s not found", btrtl_dev->ic_info->cfg_name); ret = btrtl_dev->cfg_len; + if (!ret) + ret = -EINVAL; goto err_free; } } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 0b915f2c7607..25adb3ac40eb 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -549,6 +549,8 @@ static const struct usb_device_id blacklist_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, @@ -3767,6 +3769,7 @@ static ssize_t force_poll_sync_write(struct file *file, } static const struct file_operations force_poll_sync_fops = { + .owner = THIS_MODULE, .open = simple_open, .read = force_poll_sync_read, .write = force_poll_sync_write, diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 60d22bbde8fa..2b89314e56ff 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -102,7 +102,8 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) if (!skb) { percpu_down_read(&hu->proto_lock); - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (test_bit(HCI_UART_PROTO_READY, &hu->flags) || + test_bit(HCI_UART_PROTO_INIT, &hu->flags)) skb = hu->proto->dequeue(hu); percpu_up_read(&hu->proto_lock); @@ -124,7 +125,8 @@ int hci_uart_tx_wakeup(struct hci_uart *hu) if (!percpu_down_read_trylock(&hu->proto_lock)) return 0; - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) goto no_schedule; set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); @@ -283,7 +285,8 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) percpu_down_read(&hu->proto_lock); - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) { percpu_up_read(&hu->proto_lock); return -EUNATCH; } @@ -587,7 +590,8 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty) if (tty != hu->tty) return; - if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) + if (test_bit(HCI_UART_PROTO_READY, &hu->flags) || + test_bit(HCI_UART_PROTO_INIT, &hu->flags)) hci_uart_tx_wakeup(hu); } @@ -613,7 +617,8 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, percpu_down_read(&hu->proto_lock); - if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { + if (!test_bit(HCI_UART_PROTO_READY, &hu->flags) && + !test_bit(HCI_UART_PROTO_INIT, &hu->flags)) { percpu_up_read(&hu->proto_lock); return; } @@ -714,12 +719,16 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) hu->proto = p; + set_bit(HCI_UART_PROTO_INIT, &hu->flags); + err = hci_uart_register_dev(hu); if (err) { return err; } set_bit(HCI_UART_PROTO_READY, &hu->flags); + clear_bit(HCI_UART_PROTO_INIT, &hu->flags); + return 0; } diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index fb4a2d0d8cc8..c6f13d7c22a2 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -90,6 +90,7 @@ struct hci_uart { #define HCI_UART_REGISTERED 1 #define HCI_UART_PROTO_READY 2 #define HCI_UART_NO_SUSPEND_NOTIFIER 3 +#define HCI_UART_PROTO_INIT 4 /* TX states */ #define HCI_UART_SENDING 1 diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 8378c3319cd5..b4bb4f8b8f44 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -1201,11 +1201,16 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, struct mhi_ring_element *mhi_tre; struct mhi_buf_info *buf_info; int eot, eob, chain, bei; - int ret; + int ret = 0; /* Protect accesses for reading and incrementing WP */ write_lock_bh(&mhi_chan->lock); + if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) { + ret = -ENODEV; + goto out; + } + buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; @@ -1223,10 +1228,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, if (!info->pre_mapped) { ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) { - write_unlock_bh(&mhi_chan->lock); - return ret; - } + if (ret) + goto out; } eob = !!(flags & MHI_EOB); @@ -1243,9 +1246,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, buf_ring); +out: write_unlock_bh(&mhi_chan->lock); - return 0; + return ret; } int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index e01754af576b..4bb4bcd45b0a 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -824,8 +824,9 @@ static void mhi_pci_recovery_work(struct work_struct *work) err_unprepare: mhi_unprepare_after_power_down(mhi_cntrl); err_try_reset: - if (pci_reset_function(pdev)) - dev_err(&pdev->dev, "Recovery failed\n"); + err = pci_try_reset_function(pdev); + if (err) + dev_err(&pdev->dev, "Recovery failed: %d\n", err); } static void health_check(struct timer_list *t) diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c index a0e9e80d92ee..d6a4b1671d5b 100644 --- a/drivers/char/ipmi/ipmb_dev_int.c +++ b/drivers/char/ipmi/ipmb_dev_int.c @@ -321,6 +321,9 @@ static int ipmb_probe(struct i2c_client *client) ipmb_dev->miscdev.name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s%d", "ipmb-", client->adapter->nr); + if (!ipmb_dev->miscdev.name) + return -ENOMEM; + ipmb_dev->miscdev.fops = &ipmb_fops; ipmb_dev->miscdev.parent = &client->dev; ret = misc_register(&ipmb_dev->miscdev); diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c index cd266021d010..1a5644051d31 100644 --- a/drivers/char/tpm/eventlog/acpi.c +++ b/drivers/char/tpm/eventlog/acpi.c @@ -14,6 +14,7 @@ * Access to the event log extended by the TCG BIOS of PC platform */ +#include #include #include #include @@ -62,6 +63,11 @@ static bool tpm_is_tpm2_log(void *bios_event_log, u64 len) return n == 0; } +static void tpm_bios_log_free(void *data) +{ + kvfree(data); +} + /* read binary bios log */ int tpm_read_log_acpi(struct tpm_chip *chip) { @@ -135,7 +141,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip) } /* malloc EventLog space */ - log->bios_event_log = kmalloc(len, GFP_KERNEL); + log->bios_event_log = kvmalloc(len, GFP_KERNEL); if (!log->bios_event_log) return -ENOMEM; @@ -161,10 +167,16 @@ int tpm_read_log_acpi(struct tpm_chip *chip) goto err; } + ret = devm_add_action(&chip->dev, tpm_bios_log_free, log->bios_event_log); + if (ret) { + log->bios_event_log = NULL; + goto err; + } + return format; err: - kfree(log->bios_event_log); + tpm_bios_log_free(log->bios_event_log); log->bios_event_log = NULL; return ret; } diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c index e6cb9d525e30..4e9d7c2bf32e 100644 --- a/drivers/char/tpm/eventlog/efi.c +++ b/drivers/char/tpm/eventlog/efi.c @@ -6,6 +6,7 @@ * Thiebaud Weksteen */ +#include #include #include @@ -55,7 +56,7 @@ int tpm_read_log_efi(struct tpm_chip *chip) } /* malloc EventLog space */ - log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL); + log->bios_event_log = devm_kmemdup(&chip->dev, log_tbl->log, log_size, GFP_KERNEL); if (!log->bios_event_log) { ret = -ENOMEM; goto out; @@ -76,7 +77,7 @@ int tpm_read_log_efi(struct tpm_chip *chip) MEMREMAP_WB); if (!final_tbl) { pr_err("Could not map UEFI TPM final log\n"); - kfree(log->bios_event_log); + devm_kfree(&chip->dev, log->bios_event_log); ret = -ENOMEM; goto out; } @@ -91,11 +92,11 @@ int tpm_read_log_efi(struct tpm_chip *chip) * Allocate memory for the 'combined log' where we will append the * 'final events log' to. */ - tmp = krealloc(log->bios_event_log, - log_size + final_events_log_size, - GFP_KERNEL); + tmp = devm_krealloc(&chip->dev, log->bios_event_log, + log_size + final_events_log_size, + GFP_KERNEL); if (!tmp) { - kfree(log->bios_event_log); + devm_kfree(&chip->dev, log->bios_event_log); ret = -ENOMEM; goto out; } diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c index a9ce66d09a75..741ab2204b11 100644 --- a/drivers/char/tpm/eventlog/of.c +++ b/drivers/char/tpm/eventlog/of.c @@ -10,6 +10,7 @@ * Read the event log created by the firmware on PPC64 */ +#include #include #include #include @@ -65,7 +66,7 @@ int tpm_read_log_of(struct tpm_chip *chip) return -EIO; } - log->bios_event_log = kmemdup(__va(base), size, GFP_KERNEL); + log->bios_event_log = devm_kmemdup(&chip->dev, __va(base), size, GFP_KERNEL); if (!log->bios_event_log) return -ENOMEM; diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index c0759d49fd14..916ee815b140 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -267,7 +267,6 @@ static void tpm_dev_release(struct device *dev) idr_remove(&dev_nums_idr, chip->dev_num); mutex_unlock(&idr_lock); - kfree(chip->log.bios_event_log); kfree(chip->work_space.context_buf); kfree(chip->work_space.session_buf); kfree(chip->allocated_banks); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 5889d9edaf94..caf9ba2dc0b1 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -104,11 +104,10 @@ again: return 0; /* process status changes without irq support */ do { + usleep_range(priv->timeout_min, priv->timeout_max); status = chip->ops->status(chip); if ((status & mask) == mask) return 0; - usleep_range(priv->timeout_min, - priv->timeout_max); } while (time_before(jiffies, stop)); return -ETIME; } @@ -433,7 +432,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { - rc = -ETIME; + if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags)) + rc = -EAGAIN; + else + rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); @@ -450,7 +452,10 @@ static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) if (wait_for_tpm_stat(chip, TPM_STS_VALID, chip->timeout_c, &priv->int_queue, false) < 0) { - rc = -ETIME; + if (test_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags)) + rc = -EAGAIN; + else + rc = -ETIME; goto out_err; } status = tpm_tis_status(chip); @@ -505,9 +510,11 @@ static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) if (rc >= 0) /* Data transfer done successfully */ break; - else if (rc != -EIO) + else if (rc != -EAGAIN && rc != -EIO) /* Data transfer failed, not recoverable */ return rc; + + usleep_range(priv->timeout_min, priv->timeout_max); } rc = tpm_tis_verify_crc(priv, len, buf); @@ -1044,6 +1051,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq, priv->timeout_max = TIS_TIMEOUT_MAX_ATML; } + if (priv->manufacturer_id == TPM_VID_IFX) + set_bit(TPM_TIS_STATUS_VALID_RETRY, &priv->flags); + if (is_bsw()) { priv->ilb_base_addr = ioremap(INTEL_LEGACY_BLK_BASE_ADDR, ILB_REMAP_SIZE); diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index 610bfadb6acf..5f29eebef52b 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -53,7 +53,7 @@ enum tis_int_flags { enum tis_defaults { TIS_MEM_LEN = 0x5000, TIS_SHORT_TIMEOUT = 750, /* ms */ - TIS_LONG_TIMEOUT = 2000, /* 2 sec */ + TIS_LONG_TIMEOUT = 4000, /* 4 secs */ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */ }; @@ -88,6 +88,7 @@ enum tpm_tis_flags { TPM_TIS_INVALID_STATUS = 1, TPM_TIS_DEFAULT_CANCELLATION = 2, TPM_TIS_IRQ_TESTED = 3, + TPM_TIS_STATUS_VALID_RETRY = 4, }; struct tpm_tis_data { diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 899036ce3802..5c7885307a25 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1615,8 +1615,8 @@ static void handle_control_message(struct virtio_device *vdev, break; case VIRTIO_CONSOLE_RESIZE: { struct { - __u16 rows; - __u16 cols; + __virtio16 rows; + __virtio16 cols; } size; if (!is_console_port(port)) @@ -1624,7 +1624,8 @@ static void handle_control_message(struct virtio_device *vdev, memcpy(&size, buf->buf + buf->offset + sizeof(*cpkt), sizeof(size)); - set_console_size(port, size.rows, size.cols); + set_console_size(port, virtio16_to_cpu(vdev, size.rows), + virtio16_to_cpu(vdev, size.cols)); port->cons.hvc->irq_requested = 1; resize_console(port); diff --git a/drivers/char/xillybus/xillybus_class.c b/drivers/char/xillybus/xillybus_class.c index 0f238648dcfe..e9a288e61c15 100644 --- a/drivers/char/xillybus/xillybus_class.c +++ b/drivers/char/xillybus/xillybus_class.c @@ -227,14 +227,15 @@ int xillybus_find_inode(struct inode *inode, break; } - mutex_unlock(&unit_mutex); - - if (!unit) + if (!unit) { + mutex_unlock(&unit_mutex); return -ENODEV; + } *private_data = unit->private_data; *index = minor - unit->lowest_minor; + mutex_unlock(&unit_mutex); return 0; } EXPORT_SYMBOL(xillybus_find_inode); diff --git a/drivers/char/xillybus/xillyusb.c b/drivers/char/xillybus/xillyusb.c index 3a2a0fb3d928..45771b1a3716 100644 --- a/drivers/char/xillybus/xillyusb.c +++ b/drivers/char/xillybus/xillyusb.c @@ -185,6 +185,14 @@ struct xillyusb_dev { struct mutex process_in_mutex; /* synchronize wakeup_all() */ }; +/* + * kref_mutex is used in xillyusb_open() to prevent the xillyusb_dev + * struct from being freed during the gap between being found by + * xillybus_find_inode() and having its reference count incremented. + */ + +static DEFINE_MUTEX(kref_mutex); + /* FPGA to host opcodes */ enum { OPCODE_DATA = 0, @@ -1234,9 +1242,16 @@ static int xillyusb_open(struct inode *inode, struct file *filp) int rc; int index; + mutex_lock(&kref_mutex); + rc = xillybus_find_inode(inode, (void **)&xdev, &index); - if (rc) + if (rc) { + mutex_unlock(&kref_mutex); return rc; + } + + kref_get(&xdev->kref); + mutex_unlock(&kref_mutex); chan = &xdev->channels[index]; filp->private_data = chan; @@ -1272,8 +1287,6 @@ static int xillyusb_open(struct inode *inode, struct file *filp) ((filp->f_mode & FMODE_WRITE) && chan->open_for_write)) goto unmutex_fail; - kref_get(&xdev->kref); - if (filp->f_mode & FMODE_READ) chan->open_for_read = 1; @@ -1410,6 +1423,7 @@ unopen: return rc; unmutex_fail: + kref_put(&xdev->kref, cleanup_dev); mutex_unlock(&chan->lock); return rc; } @@ -2244,7 +2258,9 @@ static void xillyusb_disconnect(struct usb_interface *interface) xdev->dev = NULL; + mutex_lock(&kref_mutex); kref_put(&xdev->kref, cleanup_dev); + mutex_unlock(&kref_mutex); } static struct usb_driver xillyusb_driver = { diff --git a/drivers/clk/analogbits/wrpll-cln28hpc.c b/drivers/clk/analogbits/wrpll-cln28hpc.c index 09ca82356399..d8ae39295996 100644 --- a/drivers/clk/analogbits/wrpll-cln28hpc.c +++ b/drivers/clk/analogbits/wrpll-cln28hpc.c @@ -291,7 +291,7 @@ int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate, vco = vco_pre * f; } - delta = abs(target_rate - vco); + delta = abs(target_vco_rate - vco); if (delta < best_delta) { best_delta = delta; best_r = r; diff --git a/drivers/clk/clk-apple-nco.c b/drivers/clk/clk-apple-nco.c index 39472a51530a..457a48d48941 100644 --- a/drivers/clk/clk-apple-nco.c +++ b/drivers/clk/clk-apple-nco.c @@ -297,6 +297,9 @@ static int applnco_probe(struct platform_device *pdev) memset(&init, 0, sizeof(init)); init.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-%d", np->name, i); + if (!init.name) + return -ENOMEM; + init.ops = &applnco_ops; init.parent_data = &pdata; init.num_parents = 1; diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c index ac6ff736ac8f..bb5cd9d38993 100644 --- a/drivers/clk/clk-axi-clkgen.c +++ b/drivers/clk/clk-axi-clkgen.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -510,6 +511,7 @@ static int axi_clkgen_probe(struct platform_device *pdev) struct clk_init_data init; const char *parent_names[2]; const char *clk_name; + struct clk *axi_clk; unsigned int i; int ret; @@ -526,8 +528,24 @@ static int axi_clkgen_probe(struct platform_device *pdev) return PTR_ERR(axi_clkgen->base); init.num_parents = of_clk_get_parent_count(pdev->dev.of_node); - if (init.num_parents < 1 || init.num_parents > 2) - return -EINVAL; + + axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk"); + if (!IS_ERR(axi_clk)) { + if (init.num_parents < 2 || init.num_parents > 3) + return -EINVAL; + + init.num_parents -= 1; + } else { + /* + * Legacy... So that old DTs which do not have clock-names still + * work. In this case we don't explicitly enable the AXI bus + * clock. + */ + if (PTR_ERR(axi_clk) != -ENOENT) + return PTR_ERR(axi_clk); + if (init.num_parents < 1 || init.num_parents > 2) + return -EINVAL; + } for (i = 0; i < init.num_parents; i++) { parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 7ffe17b1510c..c7c63653e83c 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -5128,6 +5128,10 @@ of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec) if (!clkspec) return ERR_PTR(-EINVAL); + /* Check if node in clkspec is in disabled/fail state */ + if (!of_device_is_available(clkspec->np)) + return ERR_PTR(-ENOENT); + mutex_lock(&of_clk_mutex); list_for_each_entry(provider, &of_clk_providers, link) { if (provider->node == clkspec->np) { diff --git a/drivers/clk/imx/clk-fracn-gppll.c b/drivers/clk/imx/clk-fracn-gppll.c index 421a78e295ee..5f2296bb67a6 100644 --- a/drivers/clk/imx/clk-fracn-gppll.c +++ b/drivers/clk/imx/clk-fracn-gppll.c @@ -250,9 +250,11 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate, pll_div = FIELD_PREP(PLL_RDIV_MASK, rate->rdiv) | rate->odiv | FIELD_PREP(PLL_MFI_MASK, rate->mfi); writel_relaxed(pll_div, pll->base + PLL_DIV); + readl(pll->base + PLL_DIV); if (pll->flags & CLK_FRACN_GPPLL_FRACN) { writel_relaxed(rate->mfd, pll->base + PLL_DENOMINATOR); writel_relaxed(FIELD_PREP(PLL_MFN_MASK, rate->mfn), pll->base + PLL_NUMERATOR); + readl(pll->base + PLL_NUMERATOR); } /* Wait for 5us according to fracn mode pll doc */ @@ -261,6 +263,7 @@ static int clk_fracn_gppll_set_rate(struct clk_hw *hw, unsigned long drate, /* Enable Powerup */ tmp |= POWERUP_MASK; writel_relaxed(tmp, pll->base + PLL_CTRL); + readl(pll->base + PLL_CTRL); /* Wait Lock */ ret = clk_fracn_gppll_wait_lock(pll); @@ -298,14 +301,15 @@ static int clk_fracn_gppll_prepare(struct clk_hw *hw) val |= POWERUP_MASK; writel_relaxed(val, pll->base + PLL_CTRL); - - val |= CLKMUX_EN; - writel_relaxed(val, pll->base + PLL_CTRL); + readl(pll->base + PLL_CTRL); ret = clk_fracn_gppll_wait_lock(pll); if (ret) return ret; + val |= CLKMUX_EN; + writel_relaxed(val, pll->base + PLL_CTRL); + val &= ~CLKMUX_BYPASS; writel_relaxed(val, pll->base + PLL_CTRL); diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index 2de49bbc40f3..a74b73cd243e 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -398,17 +399,158 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r static const char * const imx8mp_clkout_sels[] = {"audio_pll1_out", "audio_pll2_out", "video_pll1_out", "dummy", "dummy", "gpu_pll_out", "vpu_pll_out", - "arm_pll_out", "sys_pll1", "sys_pll2", "sys_pll3", - "dummy", "dummy", "osc_24m", "dummy", "osc_32k"}; + "arm_pll_out", "sys_pll1_out", "sys_pll2_out", + "sys_pll3_out", "dummy", "dummy", "osc_24m", + "dummy", "osc_32k"}; static struct clk_hw **hws; static struct clk_hw_onecell_data *clk_hw_data; +struct imx8mp_clock_constraints { + unsigned int clkid; + u32 maxrate; +}; + +/* + * Below tables are taken from IMX8MPCEC Rev. 2.1, 07/2023 + * Table 13. Maximum frequency of modules. + * Probable typos fixed are marked with a comment. + */ +static const struct imx8mp_clock_constraints imx8mp_clock_common_constraints[] = { + { IMX8MP_CLK_A53_DIV, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_AXI, 266666667 }, /* Datasheet claims 266MHz */ + { IMX8MP_CLK_NAND_USDHC_BUS, 266666667 }, /* Datasheet claims 266MHz */ + { IMX8MP_CLK_MEDIA_APB, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_HDMI_APB, 133333333 }, /* Datasheet claims 133MHz */ + { IMX8MP_CLK_ML_AXI, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_AHB, 133333333 }, + { IMX8MP_CLK_IPG_ROOT, 66666667 }, + { IMX8MP_CLK_AUDIO_AHB, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_DISP2_PIX, 170 * HZ_PER_MHZ }, + { IMX8MP_CLK_DRAM_ALT, 666666667 }, + { IMX8MP_CLK_DRAM_APB, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_CAN1, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_CAN2, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_PCIE_AUX, 10 * HZ_PER_MHZ }, + { IMX8MP_CLK_I2C5, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_I2C6, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI1, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI2, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI3, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI5, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_SAI6, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_ENET_QOS, 125 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_QOS_TIMER, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_REF, 125 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_TIMER, 125 * HZ_PER_MHZ }, + { IMX8MP_CLK_ENET_PHY_REF, 125 * HZ_PER_MHZ }, + { IMX8MP_CLK_NAND, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_QSPI, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_USDHC1, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_USDHC2, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_I2C1, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_I2C2, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_I2C3, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_I2C4, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_UART1, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_UART2, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_UART3, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_UART4, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_ECSPI1, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_ECSPI2, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_PWM1, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_PWM2, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_PWM3, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_PWM4, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_GPT1, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT2, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT3, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT4, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT5, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPT6, 100 * HZ_PER_MHZ }, + { IMX8MP_CLK_WDOG, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_IPP_DO_CLKO1, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_IPP_DO_CLKO2, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_HDMI_REF_266M, 266 * HZ_PER_MHZ }, + { IMX8MP_CLK_USDHC3, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_MIPI_PHY1_REF, 300 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_DISP1_PIX, 250 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_CAM2_PIX, 277 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_LDB, 595 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_ECSPI3, 80 * HZ_PER_MHZ }, + { IMX8MP_CLK_PDM, 200 * HZ_PER_MHZ }, + { IMX8MP_CLK_SAI7, 66666667 }, /* Datasheet claims 66MHz */ + { IMX8MP_CLK_MAIN_AXI, 400 * HZ_PER_MHZ }, + { /* Sentinel */ } +}; + +static const struct imx8mp_clock_constraints imx8mp_clock_nominal_constraints[] = { + { IMX8MP_CLK_M7_CORE, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_ML_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU3D_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU3D_SHADER_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU2D_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_AUDIO_AXI_SRC, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_HSIO_AXI, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_ISP, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_BUS, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_AXI, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_HDMI_AXI, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU_AXI, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU_AHB, 300 * HZ_PER_MHZ }, + { IMX8MP_CLK_NOC, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_NOC_IO, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_ML_AHB, 300 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_G1, 600 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_G2, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_CAM1_PIX, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_VC8000E, 400 * HZ_PER_MHZ }, /* Datasheet claims 500MHz */ + { IMX8MP_CLK_DRAM_CORE, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GIC, 400 * HZ_PER_MHZ }, + { /* Sentinel */ } +}; + +static const struct imx8mp_clock_constraints imx8mp_clock_overdrive_constraints[] = { + { IMX8MP_CLK_M7_CORE, 800 * HZ_PER_MHZ}, + { IMX8MP_CLK_ML_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU3D_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU3D_SHADER_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU2D_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_AUDIO_AXI_SRC, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_HSIO_AXI, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_ISP, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_BUS, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_AXI, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_HDMI_AXI, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU_AXI, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_GPU_AHB, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_NOC, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_NOC_IO, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_ML_AHB, 400 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_G1, 800 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_G2, 700 * HZ_PER_MHZ }, + { IMX8MP_CLK_MEDIA_CAM1_PIX, 500 * HZ_PER_MHZ }, + { IMX8MP_CLK_VPU_VC8000E, 500 * HZ_PER_MHZ }, /* Datasheet claims 400MHz */ + { IMX8MP_CLK_DRAM_CORE, 1000 * HZ_PER_MHZ }, + { IMX8MP_CLK_GIC, 500 * HZ_PER_MHZ }, + { /* Sentinel */ } +}; + +static void imx8mp_clocks_apply_constraints(const struct imx8mp_clock_constraints constraints[]) +{ + const struct imx8mp_clock_constraints *constr; + + for (constr = constraints; constr->clkid; constr++) + clk_hw_set_rate_range(hws[constr->clkid], 0, constr->maxrate); +} + static int imx8mp_clocks_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np; void __iomem *anatop_base, *ccm_base; + const char *opmode; int err; np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop"); @@ -703,6 +845,16 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) imx_check_clk_hws(hws, IMX8MP_CLK_END); + imx8mp_clocks_apply_constraints(imx8mp_clock_common_constraints); + + err = of_property_read_string(np, "fsl,operating-mode", &opmode); + if (!err) { + if (!strcmp(opmode, "nominal")) + imx8mp_clocks_apply_constraints(imx8mp_clock_nominal_constraints); + else if (!strcmp(opmode, "overdrive")) + imx8mp_clocks_apply_constraints(imx8mp_clock_overdrive_constraints); + } + err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data); if (err < 0) { dev_err(dev, "failed to register hws for i.MX8MP\n"); diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c index dd5abd09f3e2..620afdf8dc03 100644 --- a/drivers/clk/imx/clk-lpcg-scu.c +++ b/drivers/clk/imx/clk-lpcg-scu.c @@ -6,10 +6,12 @@ #include #include +#include #include #include #include #include +#include #include "clk-scu.h" @@ -41,6 +43,29 @@ struct clk_lpcg_scu { #define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw) +/* e10858 -LPCG clock gating register synchronization errata */ +static void lpcg_e10858_writel(unsigned long rate, void __iomem *reg, u32 val) +{ + writel(val, reg); + + if (rate >= 24 * HZ_PER_MHZ || rate == 0) { + /* + * The time taken to access the LPCG registers from the AP core + * through the interconnect is longer than the minimum delay + * of 4 clock cycles required by the errata. + * Adding a readl will provide sufficient delay to prevent + * back-to-back writes. + */ + readl(reg); + } else { + /* + * For clocks running below 24MHz, wait a minimum of + * 4 clock cycles. + */ + ndelay(4 * (DIV_ROUND_UP(1000 * HZ_PER_MHZ, rate))); + } +} + static int clk_lpcg_scu_enable(struct clk_hw *hw) { struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw); @@ -57,7 +82,8 @@ static int clk_lpcg_scu_enable(struct clk_hw *hw) val |= CLK_GATE_SCU_LPCG_HW_SEL; reg |= val << clk->bit_idx; - writel(reg, clk->reg); + + lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg); spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags); @@ -74,7 +100,7 @@ static void clk_lpcg_scu_disable(struct clk_hw *hw) reg = readl_relaxed(clk->reg); reg &= ~(CLK_GATE_SCU_LPCG_MASK << clk->bit_idx); - writel(reg, clk->reg); + lpcg_e10858_writel(clk_hw_get_rate(hw), clk->reg, reg); spin_unlock_irqrestore(&imx_lpcg_scu_lock, flags); } @@ -145,13 +171,8 @@ static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev) { struct clk_lpcg_scu *clk = dev_get_drvdata(dev); - /* - * FIXME: Sometimes writes don't work unless the CPU issues - * them twice - */ - - writel(clk->state, clk->reg); writel(clk->state, clk->reg); + lpcg_e10858_writel(0, clk->reg, clk->state); dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state); return 0; diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c index db307890e4c1..ec5afc7f2690 100644 --- a/drivers/clk/imx/clk-scu.c +++ b/drivers/clk/imx/clk-scu.c @@ -577,7 +577,7 @@ static int __maybe_unused imx_clk_scu_suspend(struct device *dev) clk->rate = clk_scu_recalc_rate(&clk->hw, 0); else clk->rate = clk_hw_get_rate(&clk->hw); - clk->is_enabled = clk_hw_is_enabled(&clk->hw); + clk->is_enabled = clk_hw_is_prepared(&clk->hw); if (clk->parent) dev_dbg(dev, "save parent %s idx %u\n", clk_hw_get_name(clk->parent), diff --git a/drivers/clk/mediatek/clk-mt2701-bdp.c b/drivers/clk/mediatek/clk-mt2701-bdp.c index b0f057207945..d2647ea58ae2 100644 --- a/drivers/clk/mediatek/clk-mt2701-bdp.c +++ b/drivers/clk/mediatek/clk-mt2701-bdp.c @@ -31,6 +31,7 @@ static const struct mtk_gate_regs bdp1_cg_regs = { GATE_MTK(_id, _name, _parent, &bdp1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv) static const struct mtk_gate bdp_clks[] = { + GATE_DUMMY(CLK_DUMMY, "bdp_dummy"), GATE_BDP0(CLK_BDP_BRG_BA, "brg_baclk", "mm_sel", 0), GATE_BDP0(CLK_BDP_BRG_DRAM, "brg_dram", "mm_sel", 1), GATE_BDP0(CLK_BDP_LARB_DRAM, "larb_dram", "mm_sel", 2), diff --git a/drivers/clk/mediatek/clk-mt2701-img.c b/drivers/clk/mediatek/clk-mt2701-img.c index eb172473f075..569b6d3607dd 100644 --- a/drivers/clk/mediatek/clk-mt2701-img.c +++ b/drivers/clk/mediatek/clk-mt2701-img.c @@ -22,6 +22,7 @@ static const struct mtk_gate_regs img_cg_regs = { GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr) static const struct mtk_gate img_clks[] = { + GATE_DUMMY(CLK_DUMMY, "img_dummy"), GATE_IMG(CLK_IMG_SMI_COMM, "img_smi_comm", "mm_sel", 0), GATE_IMG(CLK_IMG_RESZ, "img_resz", "mm_sel", 1), GATE_IMG(CLK_IMG_JPGDEC_SMI, "img_jpgdec_smi", "mm_sel", 5), diff --git a/drivers/clk/mediatek/clk-mt2701-vdec.c b/drivers/clk/mediatek/clk-mt2701-vdec.c index 0f07c5d731df..fdd2645c167f 100644 --- a/drivers/clk/mediatek/clk-mt2701-vdec.c +++ b/drivers/clk/mediatek/clk-mt2701-vdec.c @@ -31,6 +31,7 @@ static const struct mtk_gate_regs vdec1_cg_regs = { GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv) static const struct mtk_gate vdec_clks[] = { + GATE_DUMMY(CLK_DUMMY, "vdec_dummy"), GATE_VDEC0(CLK_VDEC_CKGEN, "vdec_cken", "vdec_sel", 0), GATE_VDEC1(CLK_VDEC_LARB, "vdec_larb_cken", "mm_sel", 0), }; diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c index 9dbfc11d5c59..7b1ad73309b1 100644 --- a/drivers/clk/mediatek/clk-mtk.c +++ b/drivers/clk/mediatek/clk-mtk.c @@ -21,6 +21,22 @@ #include "clk-gate.h" #include "clk-mux.h" +const struct mtk_gate_regs cg_regs_dummy = { 0, 0, 0 }; +EXPORT_SYMBOL_GPL(cg_regs_dummy); + +static int mtk_clk_dummy_enable(struct clk_hw *hw) +{ + return 0; +} + +static void mtk_clk_dummy_disable(struct clk_hw *hw) { } + +const struct clk_ops mtk_clk_dummy_ops = { + .enable = mtk_clk_dummy_enable, + .disable = mtk_clk_dummy_disable, +}; +EXPORT_SYMBOL_GPL(mtk_clk_dummy_ops); + static void mtk_init_clk_data(struct clk_hw_onecell_data *clk_data, unsigned int clk_num) { diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h index 65c24ab6c947..04f371730ee3 100644 --- a/drivers/clk/mediatek/clk-mtk.h +++ b/drivers/clk/mediatek/clk-mtk.h @@ -22,6 +22,25 @@ struct platform_device; +/* + * We need the clock IDs to start from zero but to maintain devicetree + * backwards compatibility we can't change bindings to start from zero. + * Only a few platforms are affected, so we solve issues given by the + * commonized MTK clocks probe function(s) by adding a dummy clock at + * the beginning where needed. + */ +#define CLK_DUMMY 0 + +extern const struct clk_ops mtk_clk_dummy_ops; +extern const struct mtk_gate_regs cg_regs_dummy; + +#define GATE_DUMMY(_id, _name) { \ + .id = _id, \ + .name = _name, \ + .regs = &cg_regs_dummy, \ + .ops = &mtk_clk_dummy_ops, \ + } + struct mtk_fixed_clk { int id; const char *name; diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 310accf94830..d13a60fefc1b 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -1136,8 +1136,18 @@ static struct clk_regmap g12a_cpu_clk_div16_en = { .hw.init = &(struct clk_init_data) { .name = "cpu_clk_div16_en", .ops = &clk_regmap_gate_ro_ops, - .parent_hws = (const struct clk_hw *[]) { - &g12a_cpu_clk.hw + .parent_data = &(const struct clk_parent_data) { + /* + * Note: + * G12A and G12B have different cpu clocks (with + * different struct clk_hw). We fallback to the global + * naming string mechanism so this clock picks + * up the appropriate one. Same goes for the other + * clock using cpu cluster A clock output and present + * on both G12 variant. + */ + .name = "cpu_clk", + .index = -1, }, .num_parents = 1, /* @@ -1202,7 +1212,10 @@ static struct clk_regmap g12a_cpu_clk_apb_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_apb_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1236,7 +1249,10 @@ static struct clk_regmap g12a_cpu_clk_atb_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_atb_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1270,7 +1286,10 @@ static struct clk_regmap g12a_cpu_clk_axi_div = { .hw.init = &(struct clk_init_data){ .name = "cpu_clk_axi_div", .ops = &clk_regmap_divider_ro_ops, - .parent_hws = (const struct clk_hw *[]) { &g12a_cpu_clk.hw }, + .parent_data = &(const struct clk_parent_data) { + .name = "cpu_clk", + .index = -1, + }, .num_parents = 1, }, }; @@ -1305,13 +1324,6 @@ static struct clk_regmap g12a_cpu_clk_trace_div = { .name = "cpu_clk_trace_div", .ops = &clk_regmap_divider_ro_ops, .parent_data = &(const struct clk_parent_data) { - /* - * Note: - * G12A and G12B have different cpu_clks (with - * different struct clk_hw). We fallback to the global - * naming string mechanism so cpu_clk_trace_div picks - * up the appropriate one. - */ .name = "cpu_clk", .index = -1, }, @@ -4187,7 +4199,7 @@ static MESON_GATE(g12a_spicc_1, HHI_GCLK_MPEG0, 14); static MESON_GATE(g12a_hiu_reg, HHI_GCLK_MPEG0, 19); static MESON_GATE(g12a_mipi_dsi_phy, HHI_GCLK_MPEG0, 20); static MESON_GATE(g12a_assist_misc, HHI_GCLK_MPEG0, 23); -static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 4); +static MESON_GATE(g12a_emmc_a, HHI_GCLK_MPEG0, 24); static MESON_GATE(g12a_emmc_b, HHI_GCLK_MPEG0, 25); static MESON_GATE(g12a_emmc_c, HHI_GCLK_MPEG0, 26); static MESON_GATE(g12a_audio_codec, HHI_GCLK_MPEG0, 28); diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 608e0e8ca49a..35bc13e73c0d 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -1270,14 +1270,13 @@ static struct clk_regmap gxbb_cts_i958 = { }, }; +/* + * This table skips a clock named 'cts_slow_oscin' in the documentation + * This clock does not exist yet in this controller or the AO one + */ +static u32 gxbb_32k_clk_parents_val_table[] = { 0, 2, 3 }; static const struct clk_parent_data gxbb_32k_clk_parent_data[] = { { .fw_name = "xtal", }, - /* - * FIXME: This clock is provided by the ao clock controller but the - * clock is not yet part of the binding of this controller, so string - * name must be use to set this parent. - */ - { .name = "cts_slow_oscin", .index = -1 }, { .hw = &gxbb_fclk_div3.hw }, { .hw = &gxbb_fclk_div5.hw }, }; @@ -1287,6 +1286,7 @@ static struct clk_regmap gxbb_32k_clk_sel = { .offset = HHI_32K_CLK_CNTL, .mask = 0x3, .shift = 16, + .table = gxbb_32k_clk_parents_val_table, }, .hw.init = &(struct clk_init_data){ .name = "32k_clk_sel", @@ -1310,7 +1310,7 @@ static struct clk_regmap gxbb_32k_clk_div = { &gxbb_32k_clk_sel.hw }, .num_parents = 1, - .flags = CLK_SET_RATE_PARENT | CLK_DIVIDER_ROUND_CLOSEST, + .flags = CLK_SET_RATE_PARENT, }, }; diff --git a/drivers/clk/qcom/camcc-sm8250.c b/drivers/clk/qcom/camcc-sm8250.c index 9b32c56a5bc5..e29706d78287 100644 --- a/drivers/clk/qcom/camcc-sm8250.c +++ b/drivers/clk/qcom/camcc-sm8250.c @@ -411,7 +411,7 @@ static struct clk_rcg2 cam_cc_bps_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -433,7 +433,7 @@ static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -454,7 +454,7 @@ static struct clk_rcg2 cam_cc_cci_0_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -469,7 +469,7 @@ static struct clk_rcg2 cam_cc_cci_1_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -490,7 +490,7 @@ static struct clk_rcg2 cam_cc_cphy_rx_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -511,7 +511,7 @@ static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -526,7 +526,7 @@ static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -556,7 +556,7 @@ static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -571,7 +571,7 @@ static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -586,7 +586,7 @@ static struct clk_rcg2 cam_cc_csi5phytimer_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -611,7 +611,7 @@ static struct clk_rcg2 cam_cc_fast_ahb_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -634,7 +634,7 @@ static struct clk_rcg2 cam_cc_fd_core_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -649,7 +649,7 @@ static struct clk_rcg2 cam_cc_icp_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -673,7 +673,7 @@ static struct clk_rcg2 cam_cc_ife_0_clk_src = { .parent_data = cam_cc_parent_data_2, .num_parents = ARRAY_SIZE(cam_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -710,7 +710,7 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -734,7 +734,7 @@ static struct clk_rcg2 cam_cc_ife_1_clk_src = { .parent_data = cam_cc_parent_data_3, .num_parents = ARRAY_SIZE(cam_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -749,7 +749,7 @@ static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -771,7 +771,7 @@ static struct clk_rcg2 cam_cc_ife_lite_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -786,7 +786,7 @@ static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -810,7 +810,7 @@ static struct clk_rcg2 cam_cc_ipe_0_clk_src = { .parent_data = cam_cc_parent_data_4, .num_parents = ARRAY_SIZE(cam_cc_parent_data_4), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -825,7 +825,7 @@ static struct clk_rcg2 cam_cc_jpeg_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -847,7 +847,7 @@ static struct clk_rcg2 cam_cc_mclk0_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -862,7 +862,7 @@ static struct clk_rcg2 cam_cc_mclk1_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -877,7 +877,7 @@ static struct clk_rcg2 cam_cc_mclk2_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -892,7 +892,7 @@ static struct clk_rcg2 cam_cc_mclk3_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -907,7 +907,7 @@ static struct clk_rcg2 cam_cc_mclk4_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -922,7 +922,7 @@ static struct clk_rcg2 cam_cc_mclk5_clk_src = { .parent_data = cam_cc_parent_data_1, .num_parents = ARRAY_SIZE(cam_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; @@ -993,7 +993,7 @@ static struct clk_rcg2 cam_cc_slow_ahb_clk_src = { .parent_data = cam_cc_parent_data_0, .num_parents = ARRAY_SIZE(cam_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_shared_ops, }, }; diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 8780fd107472..c591fa1ad802 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -339,6 +339,8 @@ void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, mask |= config->pre_div_mask; mask |= config->post_div_mask; mask |= config->vco_mask; + mask |= config->alpha_en_mask; + mask |= config->alpha_mode_mask; regmap_update_bits(regmap, PLL_USER_CTL(pll), mask, val); @@ -559,14 +561,19 @@ clk_alpha_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 alpha_width = pll_alpha_width(pll); - regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; + + if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) + return 0; - regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); if (ctl & PLL_ALPHA_EN) { - regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low); + if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &low)) + return 0; if (alpha_width > 32) { - regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), - &high); + if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL_U(pll), + &high)) + return 0; a = (u64)high << 32 | low; } else { a = low & GENMASK(alpha_width - 1, 0); @@ -758,8 +765,11 @@ alpha_pll_huayra_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l, alpha = 0, ctl, alpha_m, alpha_n; - regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); - regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; + + if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) + return 0; if (ctl & PLL_ALPHA_EN) { regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &alpha); @@ -953,8 +963,11 @@ clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l, frac, alpha_width = pll_alpha_width(pll); - regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); - regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; + + if (regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac)) + return 0; return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); } @@ -1012,7 +1025,8 @@ clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw); u32 ctl; - regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl); + if (regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &ctl)) + return 0; ctl >>= PLL_POST_DIV_SHIFT; ctl &= PLL_POST_DIV_MASK(pll); @@ -1228,8 +1242,11 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw, struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l, frac, alpha_width = pll_alpha_width(pll); - regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); - regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; + + if (regmap_read(pll->clkr.regmap, PLL_FRAC(pll), &frac)) + return 0; return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); } @@ -1379,7 +1396,8 @@ clk_trion_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) struct regmap *regmap = pll->clkr.regmap; u32 i, div = 1, val; - regmap_read(regmap, PLL_USER_CTL(pll), &val); + if (regmap_read(regmap, PLL_USER_CTL(pll), &val)) + return 0; val >>= pll->post_div_shift; val &= PLL_POST_DIV_MASK(pll); @@ -2252,9 +2270,12 @@ static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw, struct regmap *regmap = pll->clkr.regmap; u32 l, frac; - regmap_read(regmap, PLL_L_VAL(pll), &l); + if (regmap_read(regmap, PLL_L_VAL(pll), &l)) + return 0; l &= LUCID_EVO_PLL_L_VAL_MASK; - regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac); + + if (regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac)) + return 0; return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll)); } @@ -2329,7 +2350,8 @@ static unsigned long clk_rivian_evo_pll_recalc_rate(struct clk_hw *hw, struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l; - regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); + if (regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l)) + return 0; return parent_rate * l; } diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 60819a39702d..97bfb3c726f8 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -332,7 +332,7 @@ static unsigned long clk_rpmh_bcm_recalc_rate(struct clk_hw *hw, { struct clk_rpmh *c = to_clk_rpmh(hw); - return c->aggr_state * c->unit; + return (unsigned long)c->aggr_state * c->unit; } static const struct clk_ops clk_rpmh_bcm_ops = { diff --git a/drivers/clk/qcom/dispcc-sm6350.c b/drivers/clk/qcom/dispcc-sm6350.c index 441f042f5ea4..ddacb4f76eca 100644 --- a/drivers/clk/qcom/dispcc-sm6350.c +++ b/drivers/clk/qcom/dispcc-sm6350.c @@ -187,13 +187,12 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = { .cmd_rcgr = 0x1144, .mnd_width = 0, .hid_width = 5, + .parent_map = disp_cc_parent_map_6, .freq_tbl = ftbl_disp_cc_mdss_dp_aux_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "disp_cc_mdss_dp_aux_clk_src", - .parent_data = &(const struct clk_parent_data){ - .fw_name = "bi_tcxo", - }, - .num_parents = 1, + .parent_data = disp_cc_parent_data_6, + .num_parents = ARRAY_SIZE(disp_cc_parent_data_6), .ops = &clk_rcg2_ops, }, }; diff --git a/drivers/clk/qcom/gcc-mdm9607.c b/drivers/clk/qcom/gcc-mdm9607.c index 4c9078e99bb3..19c701b23ac7 100644 --- a/drivers/clk/qcom/gcc-mdm9607.c +++ b/drivers/clk/qcom/gcc-mdm9607.c @@ -536,7 +536,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = { }; static struct clk_rcg2 blsp1_uart6_apps_clk_src = { - .cmd_rcgr = 0x6044, + .cmd_rcgr = 0x7044, .mnd_width = 16, .hid_width = 5, .parent_map = gcc_xo_gpll0_map, diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c index 8aafa6591e84..3b32830f7466 100644 --- a/drivers/clk/qcom/gcc-msm8953.c +++ b/drivers/clk/qcom/gcc-msm8953.c @@ -3771,7 +3771,7 @@ static struct clk_branch gcc_venus0_axi_clk = { static struct clk_branch gcc_venus0_core0_vcodec0_clk = { .halt_reg = 0x4c02c, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x4c02c, .enable_mask = BIT(0), diff --git a/drivers/clk/qcom/gcc-qcs404.c b/drivers/clk/qcom/gcc-qcs404.c index 46d314d69250..14feed0f9806 100644 --- a/drivers/clk/qcom/gcc-qcs404.c +++ b/drivers/clk/qcom/gcc-qcs404.c @@ -353,6 +353,7 @@ static struct clk_alpha_pll gpll1_out_main = { /* 930MHz configuration */ static const struct alpha_pll_config gpll3_config = { .l = 48, + .alpha_hi = 0x70, .alpha = 0x0, .alpha_en_mask = BIT(24), .post_div_mask = 0xf << 8, diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index ef15e8f11402..0ea549d79283 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -455,7 +455,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { .name = "gcc_qupv3_wrap0_s0_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { @@ -471,7 +471,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { .name = "gcc_qupv3_wrap0_s1_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { @@ -487,7 +487,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { .name = "gcc_qupv3_wrap0_s2_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { @@ -503,7 +503,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { .name = "gcc_qupv3_wrap0_s3_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { @@ -519,7 +519,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { .name = "gcc_qupv3_wrap0_s4_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { @@ -535,7 +535,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { .name = "gcc_qupv3_wrap0_s5_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { @@ -551,7 +551,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = { .name = "gcc_qupv3_wrap0_s6_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { @@ -567,7 +567,7 @@ static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = { .name = "gcc_qupv3_wrap0_s7_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { @@ -583,7 +583,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { .name = "gcc_qupv3_wrap1_s0_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { @@ -599,7 +599,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { .name = "gcc_qupv3_wrap1_s1_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { @@ -615,7 +615,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { .name = "gcc_qupv3_wrap1_s2_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { @@ -631,7 +631,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { .name = "gcc_qupv3_wrap1_s3_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { @@ -647,7 +647,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { .name = "gcc_qupv3_wrap1_s4_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { @@ -663,7 +663,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { .name = "gcc_qupv3_wrap1_s5_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { @@ -679,7 +679,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = { .name = "gcc_qupv3_wrap1_s6_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = { @@ -695,7 +695,7 @@ static struct clk_init_data gcc_qupv3_wrap1_s7_clk_src_init = { .name = "gcc_qupv3_wrap1_s7_clk_src", .parent_data = gcc_parent_data_0, .num_parents = ARRAY_SIZE(gcc_parent_data_0), - .ops = &clk_rcg2_shared_ops, + .ops = &clk_rcg2_ops, }; static struct clk_rcg2 gcc_qupv3_wrap1_s7_clk_src = { diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c index 0559a33faf00..428cd99dcdcb 100644 --- a/drivers/clk/qcom/gcc-sm6350.c +++ b/drivers/clk/qcom/gcc-sm6350.c @@ -182,6 +182,14 @@ static const struct clk_parent_data gcc_parent_data_2_ao[] = { { .hw = &gpll0_out_odd.clkr.hw }, }; +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data gcc_parent_data_3[] = { + { .fw_name = "bi_tcxo" }, +}; + static const struct parent_map gcc_parent_map_4[] = { { P_BI_TCXO, 0 }, { P_GPLL0_OUT_MAIN, 1 }, @@ -701,13 +709,12 @@ static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = { .cmd_rcgr = 0x3a0b0, .mnd_width = 0, .hid_width = 5, + .parent_map = gcc_parent_map_3, .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_ufs_phy_phy_aux_clk_src", - .parent_data = &(const struct clk_parent_data){ - .fw_name = "bi_tcxo", - }, - .num_parents = 1, + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), .ops = &clk_rcg2_ops, }, }; @@ -764,13 +771,12 @@ static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { .cmd_rcgr = 0x1a034, .mnd_width = 0, .hid_width = 5, + .parent_map = gcc_parent_map_3, .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src, .clkr.hw.init = &(struct clk_init_data){ .name = "gcc_usb30_prim_mock_utmi_clk_src", - .parent_data = &(const struct clk_parent_data){ - .fw_name = "bi_tcxo", - }, - .num_parents = 1, + .parent_data = gcc_parent_data_3, + .num_parents = ARRAY_SIZE(gcc_parent_data_3), .ops = &clk_rcg2_ops, }, }; diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 0f21a8a767ac..bb6d39572d7f 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -290,6 +290,9 @@ static int gdsc_enable(struct generic_pm_domain *domain) */ udelay(1); + if (sc->flags & RETAIN_FF_ENABLE) + gdsc_retain_ff_on(sc); + /* Turn on HW trigger mode if supported */ if (sc->flags & HW_CTRL) { ret = gdsc_hwctrl(sc, true); @@ -306,9 +309,6 @@ static int gdsc_enable(struct generic_pm_domain *domain) udelay(1); } - if (sc->flags & RETAIN_FF_ENABLE) - gdsc_retain_ff_on(sc); - return 0; } @@ -418,13 +418,6 @@ static int gdsc_init(struct gdsc *sc) goto err_disable_supply; } - /* Turn on HW trigger mode if supported */ - if (sc->flags & HW_CTRL) { - ret = gdsc_hwctrl(sc, true); - if (ret < 0) - goto err_disable_supply; - } - /* * Make sure the retain bit is set if the GDSC is already on, * otherwise we end up turning off the GDSC and destroying all @@ -432,6 +425,14 @@ static int gdsc_init(struct gdsc *sc) */ if (sc->flags & RETAIN_FF_ENABLE) gdsc_retain_ff_on(sc); + + /* Turn on HW trigger mode if supported */ + if (sc->flags & HW_CTRL) { + ret = gdsc_hwctrl(sc, true); + if (ret < 0) + goto err_disable_supply; + } + } else if (sc->flags & ALWAYS_ON) { /* If ALWAYS_ON GDSCs are not ON, turn them ON */ gdsc_enable(&sc->pd); @@ -463,6 +464,23 @@ err_disable_supply: return ret; } +static void gdsc_pm_subdomain_remove(struct gdsc_desc *desc, size_t num) +{ + struct device *dev = desc->dev; + struct gdsc **scs = desc->scs; + int i; + + /* Remove subdomains */ + for (i = num - 1; i >= 0; i--) { + if (!scs[i]) + continue; + if (scs[i]->parent) + pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); + else if (!IS_ERR_OR_NULL(dev->pm_domain)) + pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); + } +} + int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *rcdev, struct regmap *regmap) { @@ -507,30 +525,27 @@ int gdsc_register(struct gdsc_desc *desc, if (!scs[i]) continue; if (scs[i]->parent) - pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); + ret = pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd); else if (!IS_ERR_OR_NULL(dev->pm_domain)) - pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); + ret = pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); + if (ret) + goto err_pm_subdomain_remove; } return of_genpd_add_provider_onecell(dev->of_node, data); + +err_pm_subdomain_remove: + gdsc_pm_subdomain_remove(desc, i); + + return ret; } void gdsc_unregister(struct gdsc_desc *desc) { - int i; struct device *dev = desc->dev; - struct gdsc **scs = desc->scs; size_t num = desc->num; - /* Remove subdomains */ - for (i = 0; i < num; i++) { - if (!scs[i]) - continue; - if (scs[i]->parent) - pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd); - else if (!IS_ERR_OR_NULL(dev->pm_domain)) - pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd); - } + gdsc_pm_subdomain_remove(desc, num); of_genpd_del_provider(dev->of_node); } diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c index bc19a23e13f8..4d187d6aba73 100644 --- a/drivers/clk/qcom/mmcc-sdm660.c +++ b/drivers/clk/qcom/mmcc-sdm660.c @@ -2544,7 +2544,7 @@ static struct clk_branch video_core_clk = { static struct clk_branch video_subcore0_clk = { .halt_reg = 0x1048, - .halt_check = BRANCH_HALT, + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x1048, .enable_mask = BIT(0), diff --git a/drivers/clk/renesas/r9a07g043-cpg.c b/drivers/clk/renesas/r9a07g043-cpg.c index 0b56688ecbfc..ac60251d7284 100644 --- a/drivers/clk/renesas/r9a07g043-cpg.c +++ b/drivers/clk/renesas/r9a07g043-cpg.c @@ -14,6 +14,17 @@ #include "rzg2l-cpg.h" +/* Specific registers. */ +#define CPG_PL2SDHI_DSEL (0x218) + +/* Clock select configuration. */ +#define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2) +#define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2) + +/* Clock status configuration. */ +#define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1) +#define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1) + enum clk_ids { /* Core Clock Outputs exported to DT */ LAST_DT_CORE_CLK = R9A07G043_CLK_P0_DIV2, @@ -75,8 +86,12 @@ static const struct clk_div_table dtable_1_32[] = { /* Mux clock tables */ static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" }; +#ifdef CONFIG_ARM64 static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; -static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; +#endif +static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" }; + +static const u32 mtable_sdhi[] = { 1, 2, 3 }; static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { /* External Clock Inputs */ @@ -120,11 +135,18 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = { DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32), DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1), DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1), +#ifdef CONFIG_ARM64 DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2), +#endif +#ifdef CONFIG_RISCV + DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1), +#endif DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), - DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, sel_shdi), - DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, sel_shdi), + DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi, + mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), + DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi, + mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4), DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4), }; diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c index 02a4fc41bb6e..77e1a3a4ef1e 100644 --- a/drivers/clk/renesas/r9a07g044-cpg.c +++ b/drivers/clk/renesas/r9a07g044-cpg.c @@ -15,6 +15,17 @@ #include "rzg2l-cpg.h" +/* Specific registers. */ +#define CPG_PL2SDHI_DSEL (0x218) + +/* Clock select configuration. */ +#define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2) +#define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2) + +/* Clock status configuration. */ +#define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1) +#define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1) + enum clk_ids { /* Core Clock Outputs exported to DT */ LAST_DT_CORE_CLK = R9A07G054_CLK_DRP_A, @@ -95,9 +106,11 @@ static const struct clk_div_table dtable_16_128[] = { static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" }; static const char * const sel_pll5_4[] = { ".pll5_foutpostdiv", ".pll5_fout1ph0" }; static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" }; -static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" }; +static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" }; static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" }; +static const u32 mtable_sdhi[] = { 1, 2, 3 }; + static const struct { struct cpg_core_clk common[56]; #ifdef CONFIG_CLK_R9A07G054 @@ -163,8 +176,10 @@ static const struct { DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, sel_pll6_2), DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2), DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4), - DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, sel_shdi), - DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, sel_shdi), + DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi, + mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), + DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi, + mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier), DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4), DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4), DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8), diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c index 473feb36a38f..af4f40c108d8 100644 --- a/drivers/clk/renesas/rzg2l-cpg.c +++ b/drivers/clk/renesas/rzg2l-cpg.c @@ -57,15 +57,37 @@ #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff) +#define CPG_WEN_BIT BIT(16) + #define MAX_VCLK_FREQ (148500000) -struct sd_hw_data { +/** + * struct clk_hw_data - clock hardware data + * @hw: clock hw + * @conf: clock configuration (register offset, shift, width) + * @sconf: clock status configuration (register offset, shift, width) + * @priv: CPG private data structure + */ +struct clk_hw_data { struct clk_hw hw; u32 conf; + u32 sconf; struct rzg2l_cpg_priv *priv; }; -#define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw) +#define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw) + +/** + * struct sd_mux_hw_data - SD MUX clock hardware data + * @hw_data: clock hw data + * @mtable: clock mux table + */ +struct sd_mux_hw_data { + struct clk_hw_data hw_data; + const u32 *mtable; +}; + +#define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data) struct rzg2l_pll5_param { u32 pl5_fracin; @@ -119,6 +141,76 @@ static void rzg2l_cpg_del_clk_provider(void *data) of_clk_del_provider(data); } +/* Must be called in atomic context. */ +static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf) +{ + u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf); + u32 off = GET_REG_OFFSET(conf); + u32 val; + + return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200); +} + +int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct clk_notifier_data *cnd = data; + struct clk_hw *hw = __clk_get_hw(cnd->clk); + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; + u32 off = GET_REG_OFFSET(clk_hw_data->conf); + u32 shift = GET_SHIFT(clk_hw_data->conf); + const u32 clk_src_266 = 3; + unsigned long flags; + int ret; + + if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266)) + return NOTIFY_DONE; + + spin_lock_irqsave(&priv->rmw_lock, flags); + + /* + * As per the HW manual, we should not directly switch from 533 MHz to + * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz) + * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first, + * and then switch to the target setting (2’b01 (533 MHz) or 2’b10 + * (400 MHz)). + * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock + * switching register is prohibited. + * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and + * the index to value mapping is done by adding 1 to the index. + */ + + writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off); + + /* Wait for the update done. */ + ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); + + spin_unlock_irqrestore(&priv->rmw_lock, flags); + + if (ret) + dev_err(priv->dev, "failed to switch to safe clk source\n"); + + return notifier_from_errno(ret); +} + +static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core, + struct rzg2l_cpg_priv *priv) +{ + struct notifier_block *nb; + + if (!core->notifier) + return 0; + + nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL); + if (!nb) + return -ENOMEM; + + nb->notifier_call = core->notifier; + + return clk_notifier_register(hw->clk, nb); +} + static struct clk * __init rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core, struct clk **clks, @@ -187,63 +279,44 @@ static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw, static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index) { - struct sd_hw_data *hwdata = to_sd_hw_data(hw); - struct rzg2l_cpg_priv *priv = hwdata->priv; - u32 off = GET_REG_OFFSET(hwdata->conf); - u32 shift = GET_SHIFT(hwdata->conf); - const u32 clk_src_266 = 2; - u32 msk, val, bitmask; + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); + struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data); + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; + u32 off = GET_REG_OFFSET(clk_hw_data->conf); + u32 shift = GET_SHIFT(clk_hw_data->conf); unsigned long flags; + u32 val; int ret; - /* - * As per the HW manual, we should not directly switch from 533 MHz to - * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz) - * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first, - * and then switch to the target setting (2’b01 (533 MHz) or 2’b10 - * (400 MHz)). - * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock - * switching register is prohibited. - * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and - * the index to value mapping is done by adding 1 to the index. - */ - bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16; - msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS; + val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index); + spin_lock_irqsave(&priv->rmw_lock, flags); - if (index != clk_src_266) { - writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off); - ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val, - !(val & msk), 10, - CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); - if (ret) - goto unlock; - } + writel((CPG_WEN_BIT | val) << shift, priv->base + off); - writel(bitmask | ((index + 1) << shift), priv->base + off); + /* Wait for the update done. */ + ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf); - ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val, - !(val & msk), 10, - CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US); -unlock: spin_unlock_irqrestore(&priv->rmw_lock, flags); if (ret) - dev_err(priv->dev, "failed to switch clk source\n"); + dev_err(priv->dev, "Failed to switch parent\n"); return ret; } static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw) { - struct sd_hw_data *hwdata = to_sd_hw_data(hw); - struct rzg2l_cpg_priv *priv = hwdata->priv; - u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf)); + struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw); + struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data); + struct rzg2l_cpg_priv *priv = clk_hw_data->priv; + u32 val; - val >>= GET_SHIFT(hwdata->conf); - val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0); + val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf)); + val >>= GET_SHIFT(clk_hw_data->conf); + val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0); - return val ? val - 1 : 0; + return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val); } static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = { @@ -257,31 +330,40 @@ rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core, void __iomem *base, struct rzg2l_cpg_priv *priv) { - struct sd_hw_data *clk_hw_data; + struct sd_mux_hw_data *sd_mux_hw_data; struct clk_init_data init; struct clk_hw *clk_hw; int ret; - clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL); - if (!clk_hw_data) + sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL); + if (!sd_mux_hw_data) return ERR_PTR(-ENOMEM); - clk_hw_data->priv = priv; - clk_hw_data->conf = core->conf; + sd_mux_hw_data->hw_data.priv = priv; + sd_mux_hw_data->hw_data.conf = core->conf; + sd_mux_hw_data->hw_data.sconf = core->sconf; + sd_mux_hw_data->mtable = core->mtable; init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0"; init.ops = &rzg2l_cpg_sd_clk_mux_ops; - init.flags = 0; + init.flags = core->flag; init.num_parents = core->num_parents; init.parent_names = core->parent_names; - clk_hw = &clk_hw_data->hw; + clk_hw = &sd_mux_hw_data->hw_data.hw; clk_hw->init = &init; ret = devm_clk_hw_register(priv->dev, clk_hw); if (ret) return ERR_PTR(ret); + ret = rzg2l_register_notifier(clk_hw, core, priv); + if (ret) { + dev_err(priv->dev, "Failed to register notifier for %s\n", + core->name); + return ERR_PTR(ret); + } + return clk_hw->clk; } @@ -289,7 +371,7 @@ static unsigned long rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params, unsigned long rate) { - unsigned long foutpostdiv_rate; + unsigned long foutpostdiv_rate, foutvco_rate; params->pl5_intin = rate / MEGA; params->pl5_fracin = div_u64(((u64)rate % MEGA) << 24, MEGA); @@ -298,10 +380,11 @@ rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param *params, params->pl5_postdiv2 = 1; params->pl5_spread = 0x16; - foutpostdiv_rate = - EXTAL_FREQ_IN_MEGA_HZ * MEGA / params->pl5_refdiv * - ((((params->pl5_intin << 24) + params->pl5_fracin)) >> 24) / - (params->pl5_postdiv1 * params->pl5_postdiv2); + foutvco_rate = div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ * MEGA, + (params->pl5_intin << 24) + params->pl5_fracin), + params->pl5_refdiv) >> 24; + foutpostdiv_rate = DIV_ROUND_CLOSEST_ULL(foutvco_rate, + params->pl5_postdiv1 * params->pl5_postdiv2); return foutpostdiv_rate; } diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h index aefa53a90059..6418961dc882 100644 --- a/drivers/clk/renesas/rzg2l-cpg.h +++ b/drivers/clk/renesas/rzg2l-cpg.h @@ -9,6 +9,8 @@ #ifndef __RENESAS_RZG2L_CPG_H__ #define __RENESAS_RZG2L_CPG_H__ +#include + #define CPG_SIPLL5_STBY (0x140) #define CPG_SIPLL5_CLK1 (0x144) #define CPG_SIPLL5_CLK3 (0x14C) @@ -19,7 +21,6 @@ #define CPG_PL2_DDIV (0x204) #define CPG_PL3A_DDIV (0x208) #define CPG_PL6_DDIV (0x210) -#define CPG_PL2SDHI_DSEL (0x218) #define CPG_CLKSTATUS (0x280) #define CPG_PL3_SSEL (0x408) #define CPG_PL6_SSEL (0x414) @@ -43,8 +44,6 @@ #define CPG_CLKSTATUS_SELSDHI0_STS BIT(28) #define CPG_CLKSTATUS_SELSDHI1_STS BIT(29) -#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200 - /* n = 0/1/2 for PLL1/4/6 */ #define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n)) #define CPG_SAMPLL_CLK2(n) (0x08 + (16 * n)) @@ -69,9 +68,6 @@ #define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1) #define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1) -#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2) -#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2) - #define EXTAL_FREQ_IN_MEGA_HZ (24) /** @@ -90,10 +86,13 @@ struct cpg_core_clk { unsigned int mult; unsigned int type; unsigned int conf; + unsigned int sconf; const struct clk_div_table *dtable; + const u32 *mtable; const char * const *parent_names; - int flag; - int mux_flags; + notifier_fn_t notifier; + u32 flag; + u32 mux_flags; int num_parents; }; @@ -151,10 +150,11 @@ enum clk_types { .parent_names = _parent_names, \ .num_parents = ARRAY_SIZE(_parent_names), \ .mux_flags = CLK_MUX_READ_ONLY) -#define DEF_SD_MUX(_name, _id, _conf, _parent_names) \ - DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \ +#define DEF_SD_MUX(_name, _id, _conf, _sconf, _parent_names, _mtable, _clk_flags, _notifier) \ + DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, .sconf = _sconf, \ .parent_names = _parent_names, \ - .num_parents = ARRAY_SIZE(_parent_names)) + .num_parents = ARRAY_SIZE(_parent_names), \ + .mtable = _mtable, .flag = _clk_flags, .notifier = _notifier) #define DEF_PLL5_FOUTPOSTDIV(_name, _id, _parent) \ DEF_TYPE(_name, _id, CLK_TYPE_SIPLL5, .parent = _parent) #define DEF_PLL5_4_MUX(_name, _id, _conf, _parent_names) \ @@ -269,4 +269,6 @@ extern const struct rzg2l_cpg_info r9a07g044_cpg_info; extern const struct rzg2l_cpg_info r9a07g054_cpg_info; extern const struct rzg2l_cpg_info r9a09g011_cpg_info; +int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, void *data); + #endif diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index 1dc1e8998a9d..4ba27a938b64 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -204,7 +204,7 @@ PNAME(mux_aclk_peri_pre_p) = { "cpll_peri", "gpll_peri", "hdmiphy_peri" }; PNAME(mux_ref_usb3otg_src_p) = { "xin24m", - "clk_usb3otg_ref" }; + "clk_ref_usb3otg_src" }; PNAME(mux_xin24m_32k_p) = { "xin24m", "clk_rtc32k" }; PNAME(mux_mac2io_src_p) = { "clk_mac2io_src", diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 0ff28938943f..d636a6b80830 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -206,6 +206,7 @@ static const struct clk_ops samsung_pll3000_clk_ops = { */ /* Maximum lock time can be 270 * PDIV cycles */ #define PLL35XX_LOCK_FACTOR (270) +#define PLL142XX_LOCK_FACTOR (150) #define PLL35XX_MDIV_MASK (0x3FF) #define PLL35XX_PDIV_MASK (0x3F) @@ -272,7 +273,11 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate, } /* Set PLL lock time. */ - writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR, + if (pll->type == pll_142xx) + writel_relaxed(rate->pdiv * PLL142XX_LOCK_FACTOR, + pll->lock_reg); + else + writel_relaxed(rate->pdiv * PLL35XX_LOCK_FACTOR, pll->lock_reg); /* Change PLL PMS values */ diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index bca4731b14ea..a95047319fcb 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -64,11 +64,11 @@ struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np, if (!ctx) panic("could not allocate clock provider context.\n"); + ctx->clk_data.num = nr_clks; for (i = 0; i < nr_clks; ++i) ctx->clk_data.hws[i] = ERR_PTR(-ENOENT); ctx->reg_base = base; - ctx->clk_data.num = nr_clks; spin_lock_init(&ctx->lock); return ctx; diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c index 8ef3cdeb7962..89d8bf4a30a2 100644 --- a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c +++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c @@ -412,19 +412,23 @@ static const struct clk_parent_data mmc0_mmc1_parents[] = { { .hw = &pll_periph0_2x_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", + mmc0_mmc1_parents, 0x830, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", + mmc0_mmc1_parents, 0x834, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static const struct clk_parent_data mmc2_parents[] = { { .fw_name = "hosc" }, @@ -433,12 +437,14 @@ static const struct clk_parent_data mmc2_parents[] = { { .hw = &pll_periph0_800M_clk.common.hw }, { .hw = &pll_audio1_div2_clk.common.hw }, }; -static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838, - 0, 4, /* M */ - 8, 2, /* P */ - 24, 3, /* mux */ - BIT(31), /* gate */ - 0); +static SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc2_parents, + 0x838, + 0, 4, /* M */ + 8, 2, /* P */ + 24, 3, /* mux */ + BIT(31), /* gate */ + 2, /* post-div */ + 0); static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws, 0x84c, BIT(0), 0); @@ -1360,7 +1366,7 @@ static int sun20i_d1_ccu_probe(struct platform_device *pdev) /* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */ val = readl(reg + SUN20I_D1_PLL_AUDIO0_REG); - val &= ~BIT(1) | BIT(0); + val &= ~(BIT(1) | BIT(0)); writel(val, reg + SUN20I_D1_PLL_AUDIO0_REG); /* Force fanout-27M factor N to 0. */ diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c index 5f93b5526e13..52c1d5ebb0a1 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c @@ -436,7 +436,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc0_clk, "mmc0", mmc_parents, 0x830, 24, 2, /* mux */ BIT(31), /* gate */ 2, /* post-div */ - CLK_SET_RATE_NO_REPARENT); + 0); static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834, 0, 4, /* M */ @@ -444,7 +444,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc1_clk, "mmc1", mmc_parents, 0x834, 24, 2, /* mux */ BIT(31), /* gate */ 2, /* post-div */ - CLK_SET_RATE_NO_REPARENT); + 0); static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838, 0, 4, /* M */ @@ -452,7 +452,7 @@ static SUNXI_CCU_MP_WITH_MUX_GATE_POSTDIV(mmc2_clk, "mmc2", mmc_parents, 0x838, 24, 2, /* mux */ BIT(31), /* gate */ 2, /* post-div */ - CLK_SET_RATE_NO_REPARENT); + 0); static SUNXI_CCU_GATE(bus_mmc0_clk, "bus-mmc0", "ahb3", 0x84c, BIT(0), 0); static SUNXI_CCU_GATE(bus_mmc1_clk, "bus-mmc1", "ahb3", 0x84c, BIT(1), 0); diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h index 6e50f3728fb5..7d836a9fb3db 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.h +++ b/drivers/clk/sunxi-ng/ccu_mp.h @@ -52,6 +52,28 @@ struct ccu_mp { } \ } +#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE_POSTDIV(_struct, _name, _parents, \ + _reg, \ + _mshift, _mwidth, \ + _pshift, _pwidth, \ + _muxshift, _muxwidth, \ + _gate, _postdiv, _flags)\ + struct ccu_mp _struct = { \ + .enable = _gate, \ + .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \ + .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \ + .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \ + .fixed_post_div = _postdiv, \ + .common = { \ + .reg = _reg, \ + .features = CCU_FEATURE_FIXED_POSTDIV, \ + .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \ + _parents, \ + &ccu_mp_ops, \ + _flags), \ + } \ + } + #define SUNXI_CCU_MP_WITH_MUX_GATE(_struct, _name, _parents, _reg, \ _mshift, _mwidth, \ _pshift, _pwidth, \ diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 44e952105895..6119542a43a9 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -392,7 +392,8 @@ config ARM_GT_INITIAL_PRESCALER_VAL This affects CPU_FREQ max delta from the initial frequency. config ARM_TIMER_SP804 - bool "Support for Dual Timer SP804 module" if COMPILE_TEST + bool "Support for Dual Timer SP804 module" + depends on ARM || ARM64 || COMPILE_TEST depends on GENERIC_SCHED_CLOCK && HAVE_CLK select CLKSRC_MMIO select TIMER_OF if OF diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 18de1f439ffd..6b4f58ee66e0 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -27,7 +27,8 @@ #include static struct clock_event_device __percpu *hv_clock_event; -static u64 hv_sched_clock_offset __ro_after_init; +/* Note: offset can hold negative values after hibernation. */ +static u64 hv_sched_clock_offset __read_mostly; /* * If false, we're using the old mechanism for stimer0 interrupts @@ -417,6 +418,17 @@ static void resume_hv_clock_tsc(struct clocksource *arg) hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); } +/* + * Called during resume from hibernation, from overridden + * x86_platform.restore_sched_clock_state routine. This is to adjust offsets + * used to calculate time for hv tsc page based sched_clock, to account for + * time spent before hibernation. + */ +void hv_adj_sched_clock_offset(u64 offset) +{ + hv_sched_clock_offset -= offset; +} + #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK static int hv_cs_enable(struct clocksource *cs) { diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index d4350bb10b83..b603c25f3dfa 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -20,13 +20,6 @@ DEFINE_RAW_SPINLOCK(i8253_lock); EXPORT_SYMBOL(i8253_lock); -/* - * Handle PIT quirk in pit_shutdown() where zeroing the counter register - * restarts the PIT, negating the shutdown. On platforms with the quirk, - * platform specific code can set this to false. - */ -bool i8253_clear_counter_on_shutdown __ro_after_init = true; - #ifdef CONFIG_CLKSRC_I8253 /* * Since the PIT overflows every tick, its not very useful @@ -108,21 +101,45 @@ int __init clocksource_i8253_init(void) #endif #ifdef CONFIG_CLKEVT_I8253 +void clockevent_i8253_disable(void) +{ + guard(raw_spinlock_irqsave)(&i8253_lock); + + /* + * Writing the MODE register should stop the counter, according to + * the datasheet. This appears to work on real hardware (well, on + * modern Intel and AMD boxes; I didn't dig the Pegasos out of the + * shed). + * + * However, some virtual implementations differ, and the MODE change + * doesn't have any effect until either the counter is written (KVM + * in-kernel PIT) or the next interrupt (QEMU). And in those cases, + * it may not stop the *count*, only the interrupts. Although in + * the virt case, that probably doesn't matter, as the value of the + * counter will only be calculated on demand if the guest reads it; + * it's the interrupts which cause steal time. + * + * Hyper-V apparently has a bug where even in mode 0, the IRQ keeps + * firing repeatedly if the counter is running. But it *does* do the + * right thing when the MODE register is written. + * + * So: write the MODE and then load the counter, which ensures that + * the IRQ is stopped on those buggy virt implementations. And then + * write the MODE again, which is the right way to stop it. + */ + outb_p(0x30, PIT_MODE); + outb_p(0, PIT_CH0); + outb_p(0, PIT_CH0); + + outb_p(0x30, PIT_MODE); +} + static int pit_shutdown(struct clock_event_device *evt) { if (!clockevent_state_oneshot(evt) && !clockevent_state_periodic(evt)) return 0; - raw_spin_lock(&i8253_lock); - - outb_p(0x30, PIT_MODE); - - if (i8253_clear_counter_on_shutdown) { - outb_p(0, PIT_CH0); - outb_p(0, PIT_CH0); - } - - raw_spin_unlock(&i8253_lock); + clockevent_i8253_disable(); return 0; } diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index b3ae38f36720..39c70b5ac44c 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -114,6 +114,9 @@ static void gic_update_frequency(void *data) static int gic_starting_cpu(unsigned int cpu) { + /* Ensure the GIC counter is running */ + clear_gic_config(GIC_CONFIG_COUNTSTOP); + gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device)); return 0; } @@ -248,9 +251,6 @@ static int __init gic_clocksource_of_init(struct device_node *node) pr_warn("Unable to register clock notifier\n"); } - /* And finally start the counter */ - clear_gic_config(GIC_CONFIG_COUNTSTOP); - /* * It's safe to use the MIPS GIC timer as a sched clock source only if * its ticks are stable, which is true on either the platforms with diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c index db2841d0beb8..90c10f378df2 100644 --- a/drivers/clocksource/timer-stm32-lp.c +++ b/drivers/clocksource/timer-stm32-lp.c @@ -168,9 +168,7 @@ static int stm32_clkevent_lp_probe(struct platform_device *pdev) } if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) { - ret = device_init_wakeup(&pdev->dev, true); - if (ret) - goto out_clk_disable; + device_set_wakeup_capable(&pdev->dev, true); ret = dev_pm_set_wake_irq(&pdev->dev, irq); if (ret) diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index 632523c1232f..734920e8c575 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -688,9 +688,9 @@ subsys_initcall(dmtimer_percpu_timer_startup); static int __init dmtimer_percpu_quirk_init(struct device_node *np, u32 pa) { - struct device_node *arm_timer; + struct device_node *arm_timer __free(device_node) = + of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); - arm_timer = of_find_compatible_node(NULL, NULL, "arm,armv7-timer"); if (of_device_is_available(arm_timer)) { pr_warn_once("ARM architected timer wrap issue i940 detected\n"); return 0; diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index e2114bcf815a..37eb26abfd87 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -2402,6 +2402,18 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma) start += PAGE_SIZE; } + +#ifdef CONFIG_MMU + /* + * Leaving behind a partial mapping of a buffer we're about to + * drop is unsafe, see remap_pfn_range_notrack(). + * We need to zap the range here ourselves instead of relying + * on the automatic zapping in remap_pfn_range() because we call + * remap_pfn_range() in a loop. + */ + if (retval) + zap_vma_ptes(vma, vma->vm_start, size); +#endif } if (retval == 0) { diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c index 951c23fa0369..88f1e7862614 100644 --- a/drivers/comedi/drivers/jr3_pci.c +++ b/drivers/comedi/drivers/jr3_pci.c @@ -87,6 +87,7 @@ struct jr3_pci_poll_delay { struct jr3_pci_dev_private { struct timer_list timer; struct comedi_device *dev; + bool timer_enable; }; union jr3_pci_single_range { @@ -596,10 +597,11 @@ static void jr3_pci_poll_dev(struct timer_list *t) delay = sub_delay.max; } } + if (devpriv->timer_enable) { + devpriv->timer.expires = jiffies + msecs_to_jiffies(delay); + add_timer(&devpriv->timer); + } spin_unlock_irqrestore(&dev->spinlock, flags); - - devpriv->timer.expires = jiffies + msecs_to_jiffies(delay); - add_timer(&devpriv->timer); } static struct jr3_pci_subdev_private * @@ -748,6 +750,7 @@ static int jr3_pci_auto_attach(struct comedi_device *dev, devpriv->dev = dev; timer_setup(&devpriv->timer, jr3_pci_poll_dev, 0); devpriv->timer.expires = jiffies + msecs_to_jiffies(1000); + devpriv->timer_enable = true; add_timer(&devpriv->timer); return 0; @@ -757,8 +760,12 @@ static void jr3_pci_detach(struct comedi_device *dev) { struct jr3_pci_dev_private *devpriv = dev->private; - if (devpriv) - del_timer_sync(&devpriv->timer); + if (devpriv) { + spin_lock_bh(&dev->spinlock); + devpriv->timer_enable = false; + spin_unlock_bh(&dev->spinlock); + timer_delete_sync(&devpriv->timer); + } comedi_pci_detach(dev); } diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c index c7af13aca36c..2f631729a870 100644 --- a/drivers/counter/microchip-tcb-capture.c +++ b/drivers/counter/microchip-tcb-capture.c @@ -369,6 +369,25 @@ static int mchp_tc_probe(struct platform_device *pdev) channel); } + /* Disable Quadrature Decoder and position measure */ + ret = regmap_update_bits(regmap, ATMEL_TC_BMR, ATMEL_TC_QDEN | ATMEL_TC_POSEN, 0); + if (ret) + return ret; + + /* Setup the period capture mode */ + ret = regmap_update_bits(regmap, ATMEL_TC_REG(priv->channel[0], CMR), + ATMEL_TC_WAVE | ATMEL_TC_ABETRG | ATMEL_TC_CMR_MASK | + ATMEL_TC_TCCLKS, + ATMEL_TC_CMR_MASK); + if (ret) + return ret; + + /* Enable clock and trigger counter */ + ret = regmap_write(regmap, ATMEL_TC_REG(priv->channel[0], CCR), + ATMEL_TC_CLKEN | ATMEL_TC_SWTRG); + if (ret) + return ret; + priv->tc_cfg = tcb_config; priv->regmap = regmap; counter->name = dev_name(&pdev->dev); diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c index 8439755559b2..537fe9b669f3 100644 --- a/drivers/counter/stm32-lptimer-cnt.c +++ b/drivers/counter/stm32-lptimer-cnt.c @@ -58,37 +58,43 @@ static int stm32_lptim_set_enable_state(struct stm32_lptim_cnt *priv, return 0; } + ret = clk_enable(priv->clk); + if (ret) + goto disable_cnt; + /* LP timer must be enabled before writing CMP & ARR */ ret = regmap_write(priv->regmap, STM32_LPTIM_ARR, priv->ceiling); if (ret) - return ret; + goto disable_clk; ret = regmap_write(priv->regmap, STM32_LPTIM_CMP, 0); if (ret) - return ret; + goto disable_clk; /* ensure CMP & ARR registers are properly written */ ret = regmap_read_poll_timeout(priv->regmap, STM32_LPTIM_ISR, val, (val & STM32_LPTIM_CMPOK_ARROK) == STM32_LPTIM_CMPOK_ARROK, 100, 1000); if (ret) - return ret; + goto disable_clk; ret = regmap_write(priv->regmap, STM32_LPTIM_ICR, STM32_LPTIM_CMPOKCF_ARROKCF); if (ret) - return ret; + goto disable_clk; - ret = clk_enable(priv->clk); - if (ret) { - regmap_write(priv->regmap, STM32_LPTIM_CR, 0); - return ret; - } priv->enabled = true; /* Start LP timer in continuous mode */ return regmap_update_bits(priv->regmap, STM32_LPTIM_CR, STM32_LPTIM_CNTSTRT, STM32_LPTIM_CNTSTRT); + +disable_clk: + clk_disable(priv->clk); +disable_cnt: + regmap_write(priv->regmap, STM32_LPTIM_CR, 0); + + return ret; } static int stm32_lptim_setup(struct stm32_lptim_cnt *priv, int enable) diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index 9bf20a5d6bda..e752fc8cb190 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -195,11 +195,17 @@ static int stm32_count_enable_write(struct counter_device *counter, { struct stm32_timer_cnt *const priv = counter_priv(counter); u32 cr1; + int ret; if (enable) { regmap_read(priv->regmap, TIM_CR1, &cr1); - if (!(cr1 & TIM_CR1_CEN)) - clk_enable(priv->clk); + if (!(cr1 & TIM_CR1_CEN)) { + ret = clk_enable(priv->clk); + if (ret) { + dev_err(counter->parent, "Cannot enable clock %d\n", ret); + return ret; + } + } regmap_update_bits(priv->regmap, TIM_CR1, TIM_CR1_CEN, TIM_CR1_CEN); @@ -380,7 +386,11 @@ static int __maybe_unused stm32_timer_cnt_resume(struct device *dev) return ret; if (priv->enabled) { - clk_enable(priv->clk); + ret = clk_enable(priv->clk); + if (ret) { + dev_err(dev, "Cannot enable clock %d\n", ret); + return ret; + } /* Restore registers that may have been lost */ regmap_write(priv->regmap, TIM_SMCR, priv->bak.smcr); diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c index fb1cb1774674..b84e368a413f 100644 --- a/drivers/counter/ti-ecap-capture.c +++ b/drivers/counter/ti-ecap-capture.c @@ -576,8 +576,13 @@ static int ecap_cnt_resume(struct device *dev) { struct counter_device *counter_dev = dev_get_drvdata(dev); struct ecap_cnt_dev *ecap_dev = counter_priv(counter_dev); + int ret; - clk_enable(ecap_dev->clk); + ret = clk_enable(ecap_dev->clk); + if (ret) { + dev_err(dev, "Cannot enable clock %d\n", ret); + return ret; + } ecap_cnt_capture_set_evmode(counter_dev, ecap_dev->pm_ctx.ev_mode); diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 1bb2b90ebb21..72464e4132e2 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -635,7 +635,14 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) #endif #ifdef CONFIG_ACPI_CPPC_LIB -static u64 get_max_boost_ratio(unsigned int cpu) +/* + * get_max_boost_ratio: Computes the max_boost_ratio as the ratio + * between the highest_perf and the nominal_perf. + * + * Returns the max_boost_ratio for @cpu. Returns the CPPC nominal + * frequency via @nominal_freq if it is non-NULL pointer. + */ +static u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq) { struct cppc_perf_caps perf_caps; u64 highest_perf, nominal_perf; @@ -658,6 +665,9 @@ static u64 get_max_boost_ratio(unsigned int cpu) nominal_perf = perf_caps.nominal_perf; + if (nominal_freq) + *nominal_freq = perf_caps.nominal_freq; + if (!highest_perf || !nominal_perf) { pr_debug("CPU%d: highest or nominal performance missing\n", cpu); return 0; @@ -670,8 +680,12 @@ static u64 get_max_boost_ratio(unsigned int cpu) return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); } + #else -static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } +static inline u64 get_max_boost_ratio(unsigned int cpu, u64 *nominal_freq) +{ + return 0; +} #endif static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) @@ -681,9 +695,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) struct acpi_cpufreq_data *data; unsigned int cpu = policy->cpu; struct cpuinfo_x86 *c = &cpu_data(cpu); + u64 max_boost_ratio, nominal_freq = 0; unsigned int valid_states = 0; unsigned int result = 0; - u64 max_boost_ratio; unsigned int i; #ifdef CONFIG_SMP static int blacklisted; @@ -833,16 +847,20 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) } freq_table[valid_states].frequency = CPUFREQ_TABLE_END; - max_boost_ratio = get_max_boost_ratio(cpu); + max_boost_ratio = get_max_boost_ratio(cpu, &nominal_freq); if (max_boost_ratio) { - unsigned int freq = freq_table[0].frequency; + unsigned int freq = nominal_freq; /* - * Because the loop above sorts the freq_table entries in the - * descending order, freq is the maximum frequency in the table. - * Assume that it corresponds to the CPPC nominal frequency and - * use it to set cpuinfo.max_freq. + * The loop above sorts the freq_table entries in the + * descending order. If ACPI CPPC has not advertised + * the nominal frequency (this is possible in CPPC + * revisions prior to 3), then use the first entry in + * the pstate table as a proxy for nominal frequency. */ + if (!freq) + freq = freq_table[0].frequency; + policy->cpuinfo.max_freq = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT; } else { /* diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 0050242d382e..cfa2e3f0e56b 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -422,6 +422,9 @@ static int cppc_get_cpu_power(struct device *cpu_dev, struct cppc_cpudata *cpu_data; policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return -EINVAL; + cpu_data = policy->driver_data; perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); @@ -489,6 +492,9 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz, int step; policy = cpufreq_cpu_get_raw(cpu_dev->id); + if (!policy) + return -EINVAL; + cpu_data = policy->driver_data; perf_caps = &cpu_data->perf_caps; max_cap = arch_scale_cpu_capacity(cpu_dev->id); @@ -743,7 +749,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu) int ret; if (!policy) - return -ENODEV; + return 0; cpu_data = policy->driver_data; diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 04d89cf0d71d..3f35ce19c7b6 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -528,16 +528,18 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy) EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch); static unsigned int __resolve_freq(struct cpufreq_policy *policy, - unsigned int target_freq, unsigned int relation) + unsigned int target_freq, + unsigned int min, unsigned int max, + unsigned int relation) { unsigned int idx; - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (!policy->freq_table) return target_freq; - idx = cpufreq_frequency_table_target(policy, target_freq, relation); + idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation); policy->cached_resolved_idx = idx; policy->cached_target_freq = target_freq; return policy->freq_table[idx].frequency; @@ -557,7 +559,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy, unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy, unsigned int target_freq) { - return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE); + unsigned int min = READ_ONCE(policy->min); + unsigned int max = READ_ONCE(policy->max); + + /* + * If this function runs in parallel with cpufreq_set_policy(), it may + * read policy->min before the update and policy->max after the update + * or the other way around, so there is no ordering guarantee. + * + * Resolve this by always honoring the max (in case it comes from + * thermal throttling or similar). + */ + if (unlikely(min > max)) + min = max; + + return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE); } EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq); @@ -2283,7 +2299,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, if (cpufreq_disabled()) return -ENODEV; - target_freq = __resolve_freq(policy, target_freq, relation); + target_freq = __resolve_freq(policy, target_freq, policy->min, + policy->max, relation); pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", policy->cpu, target_freq, relation, old_target_freq); @@ -2573,11 +2590,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, * Resolve policy min/max to available frequencies. It ensures * no frequency resolution will neither overshoot the requested maximum * nor undershoot the requested minimum. + * + * Avoid storing intermediate values in policy->max or policy->min and + * compiler optimizations around them because they may be accessed + * concurrently by cpufreq_driver_resolve_freq() during the update. */ - policy->min = new_data.min; - policy->max = new_data.max; - policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L); - policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); + WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, + new_data.min, new_data.max, + CPUFREQ_RELATION_H)); + new_data.min = __resolve_freq(policy, new_data.min, new_data.min, + new_data.max, CPUFREQ_RELATION_L); + WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min); + trace_cpu_frequency_limits(policy); policy->cached_target_freq = UINT_MAX; @@ -2673,10 +2697,18 @@ EXPORT_SYMBOL(cpufreq_update_policy); */ void cpufreq_update_limits(unsigned int cpu) { + struct cpufreq_policy *policy; + + policy = cpufreq_cpu_get(cpu); + if (!policy) + return; + if (cpufreq_driver->update_limits) cpufreq_driver->update_limits(cpu); else cpufreq_update_policy(cpu); + + cpufreq_cpu_put(policy); } EXPORT_SYMBOL_GPL(cpufreq_update_limits); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 85da677c43d6..c8bca3e77bce 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -145,7 +145,23 @@ unsigned int dbs_update(struct cpufreq_policy *policy) time_elapsed = update_time - j_cdbs->prev_update_time; j_cdbs->prev_update_time = update_time; - idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + /* + * cur_idle_time could be smaller than j_cdbs->prev_cpu_idle if + * it's obtained from get_cpu_idle_time_jiffy() when NOHZ is + * off, where idle_time is calculated by the difference between + * time elapsed in jiffies and "busy time" obtained from CPU + * statistics. If a CPU is 100% busy, the time elapsed and busy + * time should grow with the same amount in two consecutive + * samples, but in practice there could be a tiny difference, + * making the accumulated idle time decrease sometimes. Hence, + * in this case, idle_time should be regarded as 0 in order to + * make the further process correct. + */ + if (cur_idle_time > j_cdbs->prev_cpu_idle) + idle_time = cur_idle_time - j_cdbs->prev_cpu_idle; + else + idle_time = 0; + j_cdbs->prev_cpu_idle = cur_idle_time; if (ignore_nice) { @@ -162,7 +178,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy) * calls, so the previous load value can be used then. */ load = j_cdbs->prev_load; - } else if (unlikely((int)idle_time > 2 * sampling_rate && + } else if (unlikely(idle_time > 2 * sampling_rate && j_cdbs->prev_load)) { /* * If the CPU had gone completely idle and a task has @@ -189,30 +205,15 @@ unsigned int dbs_update(struct cpufreq_policy *policy) load = j_cdbs->prev_load; j_cdbs->prev_load = 0; } else { - if (time_elapsed >= idle_time) { + if (time_elapsed > idle_time) load = 100 * (time_elapsed - idle_time) / time_elapsed; - } else { - /* - * That can happen if idle_time is returned by - * get_cpu_idle_time_jiffy(). In that case - * idle_time is roughly equal to the difference - * between time_elapsed and "busy time" obtained - * from CPU statistics. Then, the "busy time" - * can end up being greater than time_elapsed - * (for example, if jiffies_64 and the CPU - * statistics are updated by different CPUs), - * so idle_time may in fact be negative. That - * means, though, that the CPU was busy all - * the time (on the rough average) during the - * last sampling interval and 100 can be - * returned as the load. - */ - load = (int)idle_time < 0 ? 100 : 0; - } + else + load = 0; + j_cdbs->prev_load = load; } - if (unlikely((int)idle_time > 2 * sampling_rate)) { + if (unlikely(idle_time > 2 * sampling_rate)) { unsigned int periods = idle_time / sampling_rate; if (periods < idle_periods) diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c index 3dba4e99df71..339f94a69125 100644 --- a/drivers/cpufreq/cpufreq_interactive.c +++ b/drivers/cpufreq/cpufreq_interactive.c @@ -300,6 +300,7 @@ static unsigned int choose_freq(struct interactive_cpu *icpu, */ index = cpufreq_frequency_table_target(policy, loadadjfreq / tl, + policy->min, policy->max, CPUFREQ_RELATION_L); freq = freq_table[index].frequency; @@ -313,7 +314,7 @@ static unsigned int choose_freq(struct interactive_cpu *icpu, /* Find highest frequency that is less than freqmax */ index = cpufreq_frequency_table_target(policy, - freqmax - 1, CPUFREQ_RELATION_H); + freqmax - 1, policy->min, policy->max, CPUFREQ_RELATION_H); freq = freq_table[index].frequency; @@ -335,7 +336,7 @@ static unsigned int choose_freq(struct interactive_cpu *icpu, /* Find lowest frequency that is higher than freqmin */ index = cpufreq_frequency_table_target(policy, - freqmin + 1, CPUFREQ_RELATION_L); + freqmin + 1, policy->min, policy->max, CPUFREQ_RELATION_L); freq = freq_table[index].frequency; @@ -437,6 +438,7 @@ static void eval_target_freq(struct interactive_cpu *icpu) icpu->loc_hispeed_val_time = now; index = cpufreq_frequency_table_target(policy, new_freq, + policy->min, policy->max, CPUFREQ_RELATION_L); new_freq = freq_table[index].frequency; diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index c52d19d67557..65cbd5ecbaf8 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -77,7 +77,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, return freq_next; } - index = cpufreq_frequency_table_target(policy, freq_next, relation); + index = cpufreq_frequency_table_target(policy, freq_next, policy->min, + policy->max, relation); freq_req = freq_table[index].frequency; freq_reduc = freq_req * od_tuners->powersave_bias / 1000; freq_avg = freq_req - freq_reduc; diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 67e56cf638ef..190f8aa7ad02 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -116,8 +116,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy) EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) + unsigned int target_freq, unsigned int min, + unsigned int max, unsigned int relation) { struct cpufreq_frequency_table optimal = { .driver_data = ~0, @@ -148,7 +148,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, cpufreq_for_each_valid_entry_idx(pos, table, i) { freq = pos->frequency; - if ((freq < policy->min) || (freq > policy->max)) + if (freq < min || freq > max) continue; if (freq == target_freq) { optimal.driver_data = i; diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index afc59b292153..63cae4037deb 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -154,7 +154,9 @@ static int __init cpufreq_init(void) ret = cpufreq_register_driver(&loongson2_cpufreq_driver); - if (!ret && !nowait) { + if (ret) { + platform_driver_unregister(&platform_driver); + } else if (!nowait) { saved_cpu_wait = cpu_wait; cpu_wait = loongson2_cpu_wait; } diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c index 62f5a9d64e8f..7c67698057ac 100644 --- a/drivers/cpufreq/mediatek-cpufreq-hw.c +++ b/drivers/cpufreq/mediatek-cpufreq-hw.c @@ -62,7 +62,7 @@ mtk_cpufreq_get_cpu_power(struct device *cpu_dev, unsigned long *uW, policy = cpufreq_cpu_get_raw(cpu_dev->id); if (!policy) - return 0; + return -EINVAL; data = policy->driver_data; diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c index c6bdfc308e99..9cef71528076 100644 --- a/drivers/cpufreq/s3c64xx-cpufreq.c +++ b/drivers/cpufreq/s3c64xx-cpufreq.c @@ -24,6 +24,7 @@ struct s3c64xx_dvfs { unsigned int vddarm_max; }; +#ifdef CONFIG_REGULATOR static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = { [0] = { 1000000, 1150000 }, [1] = { 1050000, 1150000 }, @@ -31,6 +32,7 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = { [3] = { 1200000, 1350000 }, [4] = { 1300000, 1350000 }, }; +#endif static struct cpufreq_frequency_table s3c64xx_freq_table[] = { { 0, 0, 66000 }, @@ -51,15 +53,16 @@ static struct cpufreq_frequency_table s3c64xx_freq_table[] = { static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { - struct s3c64xx_dvfs *dvfs; - unsigned int old_freq, new_freq; + unsigned int new_freq = s3c64xx_freq_table[index].frequency; int ret; +#ifdef CONFIG_REGULATOR + struct s3c64xx_dvfs *dvfs; + unsigned int old_freq; + old_freq = clk_get_rate(policy->clk) / 1000; - new_freq = s3c64xx_freq_table[index].frequency; dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[index].driver_data]; -#ifdef CONFIG_REGULATOR if (vddarm && new_freq > old_freq) { ret = regulator_set_voltage(vddarm, dvfs->vddarm_min, diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 079940c69ee0..e4989764efe2 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -33,11 +33,17 @@ static const struct scmi_perf_proto_ops *perf_ops; static unsigned int scmi_cpufreq_get_rate(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - struct scmi_data *priv = policy->driver_data; + struct cpufreq_policy *policy; + struct scmi_data *priv; unsigned long rate; int ret; + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) + return 0; + + priv = policy->driver_data; + ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false); if (ret) return 0; diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index ac719aca49b7..217073faf60c 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -29,9 +29,16 @@ static struct scpi_ops *scpi_ops; static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) { - struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); - struct scpi_data *priv = policy->driver_data; - unsigned long rate = clk_get_rate(priv->clk); + struct cpufreq_policy *policy; + struct scpi_data *priv; + unsigned long rate; + + policy = cpufreq_cpu_get_raw(cpu); + if (unlikely(!policy)) + return 0; + + priv = policy->driver_data; + rate = clk_get_rate(priv->clk); return rate / 1000; } @@ -39,8 +46,9 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) static int scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) { - u64 rate = policy->freq_table[index].frequency * 1000; + unsigned long freq_khz = policy->freq_table[index].frequency; struct scpi_data *priv = policy->driver_data; + unsigned long rate = freq_khz * 1000; int ret; ret = clk_set_rate(priv->clk, rate); @@ -48,7 +56,7 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) if (ret) return ret; - if (clk_get_rate(priv->clk) != rate) + if (clk_get_rate(priv->clk) / 1000 != freq_khz) return -EIO; return 0; diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c index 6c88827f4e62..1d6b54303723 100644 --- a/drivers/cpufreq/tegra186-cpufreq.c +++ b/drivers/cpufreq/tegra186-cpufreq.c @@ -73,11 +73,18 @@ static int tegra186_cpufreq_init(struct cpufreq_policy *policy) { struct tegra186_cpufreq_data *data = cpufreq_get_driver_data(); unsigned int cluster = data->cpus[policy->cpu].bpmp_cluster_id; + u32 cpu; policy->freq_table = data->clusters[cluster].table; policy->cpuinfo.transition_latency = 300 * 1000; policy->driver_data = NULL; + /* set same policy for all cpus in a cluster */ + for (cpu = 0; cpu < ARRAY_SIZE(tegra186_cpus); cpu++) { + if (data->cpus[cpu].bpmp_cluster_id == cluster) + cpumask_set_cpu(cpu, policy->cpus); + } + return 0; } diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c index af7320a768d2..963d5f171ef7 100644 --- a/drivers/cpuidle/cpuidle-riscv-sbi.c +++ b/drivers/cpuidle/cpuidle-riscv-sbi.c @@ -540,12 +540,12 @@ static int sbi_cpuidle_probe(struct platform_device *pdev) int cpu, ret; struct cpuidle_driver *drv; struct cpuidle_device *dev; - struct device_node *np, *pds_node; + struct device_node *pds_node; /* Detect OSI support based on CPU DT nodes */ sbi_cpuidle_use_osi = true; for_each_possible_cpu(cpu) { - np = of_cpu_device_node_get(cpu); + struct device_node *np __free(device_node) = of_cpu_device_node_get(cpu); if (np && of_find_property(np, "power-domains", NULL) && of_find_property(np, "power-domain-names", NULL)) { diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index c4922684f305..4edac724983a 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -249,8 +249,19 @@ again: * This can deal with workloads that have long pauses interspersed * with sporadic activity with a bunch of short pauses. */ - if ((divisor * 4) <= INTERVALS * 3) + if (divisor * 4 <= INTERVALS * 3) { + /* + * If there are sufficiently many data points still under + * consideration after the outliers have been eliminated, + * returning without a prediction would be a mistake because it + * is likely that the next interval will not exceed the current + * maximum, so return the latter in that case. + */ + if (divisor >= INTERVALS / 2) + return max; + return UINT_MAX; + } thresh = max - 1; goto again; diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c index a84b657598c6..51738c730717 100644 --- a/drivers/crypto/atmel-sha204a.c +++ b/drivers/crypto/atmel-sha204a.c @@ -107,7 +107,12 @@ static int atmel_sha204a_probe(struct i2c_client *client, i2c_priv->hwrng.name = dev_name(&client->dev); i2c_priv->hwrng.read = atmel_sha204a_rng_read; - i2c_priv->hwrng.quality = 1024; + + /* + * According to review by Bill Cox [1], this HWRNG has very low entropy. + * [1] https://www.metzdowd.com/pipermail/cryptography/2014-December/023858.html + */ + i2c_priv->hwrng.quality = 1; ret = devm_hwrng_register(&client->dev, &i2c_priv->hwrng); if (ret) diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index c8c799428fe0..9a3e4d76acf7 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -2415,6 +2415,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, static int ahash_hmac_init(struct ahash_request *req) { + int ret; struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); @@ -2424,7 +2425,9 @@ static int ahash_hmac_init(struct ahash_request *req) flow_log("ahash_hmac_init()\n"); /* init the context as a hash */ - ahash_init(req); + ret = ahash_init(req); + if (ret) + return ret; if (!spu_no_incr_hash(ctx)) { /* SPU-M can do incr hashing but needs sw for outer HMAC */ diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c index 51b48b57266a..7881846651d1 100644 --- a/drivers/crypto/caam/caampkc.c +++ b/drivers/crypto/caam/caampkc.c @@ -979,7 +979,7 @@ err: return -ENOMEM; } -static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, +static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, struct rsa_key *raw_key) { struct caam_rsa_key *rsa_key = &ctx->key; @@ -988,7 +988,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); if (!rsa_key->p) - return; + return -ENOMEM; rsa_key->p_sz = p_sz; rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz); @@ -1021,7 +1021,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, rsa_key->priv_form = FORM3; - return; + return 0; free_dq: kfree_sensitive(rsa_key->dq); @@ -1035,6 +1035,7 @@ free_q: kfree_sensitive(rsa_key->q); free_p: kfree_sensitive(rsa_key->p); + return -ENOMEM; } static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, @@ -1080,7 +1081,9 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, rsa_key->e_sz = raw_key.e_sz; rsa_key->n_sz = raw_key.n_sz; - caam_rsa_set_priv_key_form(ctx, &raw_key); + ret = caam_rsa_set_priv_key_form(ctx, &raw_key); + if (ret) + goto err; return 0; diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index c36f27376d7e..df12b562ad3b 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -115,12 +115,12 @@ int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) qm_fd_addr_set64(&fd, addr); do { + refcount_inc(&req->drv_ctx->refcnt); ret = qman_enqueue(req->drv_ctx->req_fq, &fd); - if (likely(!ret)) { - refcount_inc(&req->drv_ctx->refcnt); + if (likely(!ret)) return 0; - } + refcount_dec(&req->drv_ctx->refcnt); if (ret != -EBUSY) break; num_retries++; @@ -765,7 +765,7 @@ int caam_qi_init(struct platform_device *caam_pdev) caam_debugfs_qi_init(ctrlpriv); - err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv); + err = devm_add_action_or_reset(qidev, caam_qi_shutdown, qidev); if (err) return err; diff --git a/drivers/crypto/cavium/cpt/cptpf_main.c b/drivers/crypto/cavium/cpt/cptpf_main.c index 6872ac344001..54de869e5374 100644 --- a/drivers/crypto/cavium/cpt/cptpf_main.c +++ b/drivers/crypto/cavium/cpt/cptpf_main.c @@ -44,7 +44,7 @@ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask, dev_err(dev, "Cores still busy %llx", coremask); grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); - if (timeout--) + if (!timeout--) break; udelay(CSR_DELAY); @@ -302,6 +302,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) ret = do_cpt_init(cpt, mcode); if (ret) { + dma_free_coherent(&cpt->pdev->dev, mcode->code_size, + mcode->code, mcode->phys_base); dev_err(dev, "do_cpt_init failed with ret: %d\n", ret); goto fw_release; } @@ -394,7 +396,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt) dev_err(dev, "Cores still busy"); grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); - if (timeout--) + if (!timeout--) break; udelay(CSR_DELAY); diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index 55411b494d69..32c0b2744654 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -179,14 +179,17 @@ static bool sp_pci_is_master(struct sp_device *sp) pdev_new = to_pci_dev(dev_new); pdev_cur = to_pci_dev(dev_cur); - if (pdev_new->bus->number < pdev_cur->bus->number) - return true; + if (pci_domain_nr(pdev_new->bus) != pci_domain_nr(pdev_cur->bus)) + return pci_domain_nr(pdev_new->bus) < pci_domain_nr(pdev_cur->bus); - if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) - return true; + if (pdev_new->bus->number != pdev_cur->bus->number) + return pdev_new->bus->number < pdev_cur->bus->number; - if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) - return true; + if (PCI_SLOT(pdev_new->devfn) != PCI_SLOT(pdev_cur->devfn)) + return PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn); + + if (PCI_FUNC(pdev_new->devfn) != PCI_FUNC(pdev_cur->devfn)) + return PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn); return false; } diff --git a/drivers/crypto/hisilicon/hpre/hpre_main.c b/drivers/crypto/hisilicon/hpre/hpre_main.c index e9abb66773fe..b0596564d27d 100644 --- a/drivers/crypto/hisilicon/hpre/hpre_main.c +++ b/drivers/crypto/hisilicon/hpre/hpre_main.c @@ -1281,11 +1281,15 @@ static u32 hpre_get_hw_err_status(struct hisi_qm *qm) static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { - u32 nfe; - writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); - nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); - writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); +} + +static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type) +{ + u32 nfe_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); + writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB); } static void hpre_open_axi_master_ooo(struct hisi_qm *qm) @@ -1299,6 +1303,27 @@ static void hpre_open_axi_master_ooo(struct hisi_qm *qm) qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); } +static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm) +{ + u32 err_status; + + err_status = hpre_get_hw_err_status(qm); + if (err_status) { + if (err_status & qm->err_info.ecc_2bits_mask) + qm->err_status.is_dev_ecc_mbit = true; + hpre_log_hw_error(qm, err_status); + + if (err_status & qm->err_info.dev_reset_mask) { + /* Disable the same error reporting until device is recovered. */ + hpre_disable_error_report(qm, err_status); + return ACC_ERR_NEED_RESET; + } + hpre_clear_hw_err_status(qm, err_status); + } + + return ACC_ERR_RECOVERED; +} + static void hpre_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; @@ -1325,12 +1350,12 @@ static const struct hisi_qm_err_ini hpre_err_ini = { .hw_err_disable = hpre_hw_error_disable, .get_dev_hw_err_status = hpre_get_hw_err_status, .clear_dev_hw_err_status = hpre_clear_hw_err_status, - .log_dev_hw_err = hpre_log_hw_error, .open_axi_master_ooo = hpre_open_axi_master_ooo, .open_sva_prefetch = hpre_open_sva_prefetch, .close_sva_prefetch = hpre_close_sva_prefetch, .show_last_dfx_regs = hpre_show_last_dfx_regs, .err_info_init = hpre_err_info_init, + .get_err_result = hpre_get_err_result, }; static int hpre_pf_probe_init(struct hpre *hpre) diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c index df14727f6e71..a9bf65da30a6 100644 --- a/drivers/crypto/hisilicon/qm.c +++ b/drivers/crypto/hisilicon/qm.c @@ -281,12 +281,6 @@ enum vft_type { SHAPER_VFT, }; -enum acc_err_result { - ACC_ERR_NONE, - ACC_ERR_NEED_RESET, - ACC_ERR_RECOVERED, -}; - enum qm_alg_type { ALG_TYPE_0, ALG_TYPE_1, @@ -1503,22 +1497,25 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) { - u32 error_status, tmp; + u32 error_status; - /* read err sts */ - tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); - error_status = qm->error_mask & tmp; - - if (error_status) { + error_status = qm_get_hw_error_status(qm); + if (error_status & qm->error_mask) { if (error_status & QM_ECC_MBIT) qm->err_status.is_qm_ecc_mbit = true; qm_log_hw_error(qm, error_status); - if (error_status & qm->err_info.qm_reset_mask) + if (error_status & qm->err_info.qm_reset_mask) { + /* Disable the same error reporting until device is recovered. */ + writel(qm->err_info.nfe & (~error_status), + qm->io_base + QM_RAS_NFE_ENABLE); return ACC_ERR_NEED_RESET; + } + /* Clear error source if not need reset. */ writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); + writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE); } return ACC_ERR_RECOVERED; @@ -3868,30 +3865,12 @@ EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) { - u32 err_sts; - - if (!qm->err_ini->get_dev_hw_err_status) { - dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); + if (!qm->err_ini->get_err_result) { + dev_err(&qm->pdev->dev, "Device doesn't support reset!\n"); return ACC_ERR_NONE; } - /* get device hardware error status */ - err_sts = qm->err_ini->get_dev_hw_err_status(qm); - if (err_sts) { - if (err_sts & qm->err_info.ecc_2bits_mask) - qm->err_status.is_dev_ecc_mbit = true; - - if (qm->err_ini->log_dev_hw_err) - qm->err_ini->log_dev_hw_err(qm, err_sts); - - if (err_sts & qm->err_info.dev_reset_mask) - return ACC_ERR_NEED_RESET; - - if (qm->err_ini->clear_dev_hw_err_status) - qm->err_ini->clear_dev_hw_err_status(qm, err_sts); - } - - return ACC_ERR_RECOVERED; + return qm->err_ini->get_err_result(qm); } static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h index 410c83712e28..30c2b1a64695 100644 --- a/drivers/crypto/hisilicon/sec2/sec.h +++ b/drivers/crypto/hisilicon/sec2/sec.h @@ -37,6 +37,7 @@ struct sec_aead_req { u8 *a_ivin; dma_addr_t a_ivin_dma; struct aead_request *aead_req; + bool fallback; }; /* SEC request of Crypto */ @@ -90,9 +91,7 @@ struct sec_auth_ctx { dma_addr_t a_key_dma; u8 *a_key; u8 a_key_len; - u8 mac_len; u8 a_alg; - bool fallback; struct crypto_shash *hash_tfm; struct crypto_aead *fallback_aead_tfm; }; diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c index 09a20307d01e..292ab0ff2b07 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.c +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c @@ -57,7 +57,6 @@ #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E -#define SEC_SQE_LEN_RATE_MASK 0x3 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 @@ -80,16 +79,16 @@ #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ SEC_PBUF_LEFT_SZ(depth)) -#define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 #define SEC_ICV_ERR 0x2 -#define MIN_MAC_LEN 4 #define MAC_LEN_MASK 0x1U #define MAX_INPUT_DATA_LEN 0xFFFE00 #define BITS_MASK 0xFF +#define WORD_MASK 0x3 #define BYTE_BITS 0x8 +#define BYTES_TO_WORDS(bcount) ((bcount) >> 2) #define SEC_XTS_NAME_SZ 0x3 #define IV_CM_CAL_NUM 2 #define IV_CL_MASK 0x7 @@ -850,6 +849,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); break; default: + dev_err(dev, "sec c_alg err!\n"); return -EINVAL; } @@ -953,15 +953,14 @@ static int sec_aead_mac_init(struct sec_aead_req *req) struct aead_request *aead_req = req->aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); size_t authsize = crypto_aead_authsize(tfm); - u8 *mac_out = req->out_mac; struct scatterlist *sgl = aead_req->src; + u8 *mac_out = req->out_mac; size_t copy_size; off_t skip_size; /* Copy input mac */ skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; - copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, - authsize, skip_size); + copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size); if (unlikely(copy_size != authsize)) return -EINVAL; @@ -1095,11 +1094,6 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_shash *hash_tfm = ctx->hash_tfm; int blocksize, digestsize, ret; - if (!keys->authkeylen) { - pr_err("hisi_sec2: aead auth key error!\n"); - return -EINVAL; - } - blocksize = crypto_shash_blocksize(hash_tfm); digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { @@ -1111,7 +1105,8 @@ static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, } ctx->a_key_len = digestsize; } else { - memcpy(ctx->a_key, keys->authkey, keys->authkeylen); + if (keys->authkeylen) + memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; } @@ -1124,10 +1119,7 @@ static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) struct sec_ctx *ctx = crypto_tfm_ctx(tfm); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - if (unlikely(a_ctx->fallback_aead_tfm)) - return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); - - return 0; + return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); } static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, @@ -1143,7 +1135,6 @@ static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, const u32 keylen, const enum sec_hash_alg a_alg, const enum sec_calg c_alg, - const enum sec_mac_len mac_len, const enum sec_cmode c_mode) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); @@ -1155,7 +1146,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, ctx->a_ctx.a_alg = a_alg; ctx->c_ctx.c_alg = c_alg; - ctx->a_ctx.mac_len = mac_len; c_ctx->c_mode = c_mode; if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { @@ -1166,16 +1156,11 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, } memcpy(c_ctx->c_key, key, keylen); - if (unlikely(a_ctx->fallback_aead_tfm)) { - ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); - if (ret) - return ret; - } - - return 0; + return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); } - if (crypto_authenc_extractkeys(&keys, key, keylen)) + ret = crypto_authenc_extractkeys(&keys, key, keylen); + if (ret) goto bad_key; ret = sec_aead_aes_set_key(c_ctx, &keys); @@ -1190,9 +1175,15 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, goto bad_key; } - if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || - (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { - dev_err(dev, "MAC or AUTH key length error!\n"); + if (ctx->a_ctx.a_key_len & WORD_MASK) { + ret = -EINVAL; + dev_err(dev, "AUTH key length error!\n"); + goto bad_key; + } + + ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); + if (ret) { + dev_err(dev, "set sec fallback key err!\n"); goto bad_key; } @@ -1200,31 +1191,23 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, bad_key: memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); - return -EINVAL; + return ret; } -#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ -static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ - u32 keylen) \ -{ \ - return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ +#define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \ +static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \ +{ \ + return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \ } -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, - SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, - SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, - SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) -GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, - SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) -GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, - SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) -GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, - SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) -GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, - SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC) +GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM) +GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM) static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { @@ -1473,9 +1456,10 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - struct sec_cipher_req *c_req = &req->c_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *a_req = &req->aead_req; - size_t authsize = ctx->a_ctx.mac_len; + struct sec_cipher_req *c_req = &req->c_req; u32 data_size = aead_req->cryptlen; u8 flage = 0; u8 cm, cl; @@ -1516,10 +1500,8 @@ static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; - struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); - size_t authsize = crypto_aead_authsize(tfm); - struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; + struct sec_cipher_req *c_req = &req->c_req; memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); @@ -1527,15 +1509,11 @@ static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) /* * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, * the counter must set to 0x01 + * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ - ctx->a_ctx.mac_len = authsize; - /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ set_aead_auth_iv(ctx, req); - } - - /* GCM 12Byte Cipher_IV == Auth_IV */ - if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { - ctx->a_ctx.mac_len = authsize; + } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { + /* GCM 12Byte Cipher_IV == Auth_IV */ memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); } } @@ -1545,9 +1523,11 @@ static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ - sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); + sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; @@ -1571,9 +1551,11 @@ static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ - sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); + sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sqe3->a_key_addr = sqe3->c_key_addr; @@ -1597,15 +1579,15 @@ static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); - sec_sqe->type2.mac_key_alg = - cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); + sec_sqe->type2.mac_key_alg = cpu_to_le32(BYTES_TO_WORDS(authsize)); sec_sqe->type2.mac_key_alg |= - cpu_to_le32((u32)((ctx->a_key_len) / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); @@ -1651,16 +1633,16 @@ static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; + struct crypto_aead *tfm = crypto_aead_reqtfm(aq); + size_t authsize = crypto_aead_authsize(tfm); sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->mac_len / - SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); + cpu_to_le32(BYTES_TO_WORDS(authsize) << SEC_MAC_OFFSET_V3); sqe3->auth_mac_key |= - cpu_to_le32((u32)(ctx->a_key_len / - SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); + cpu_to_le32((u32)BYTES_TO_WORDS(ctx->a_key_len) << SEC_AKEY_OFFSET_V3); sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); @@ -1706,9 +1688,9 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) { struct aead_request *a_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); + size_t authsize = crypto_aead_authsize(tfm); struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; - size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct aead_request *backlog_aead_req; struct sec_req *backlog_req; @@ -1721,10 +1703,8 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) if (!err && c_req->encrypt) { struct scatterlist *sgl = a_req->dst; - sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), - aead_req->out_mac, - authsize, a_req->cryptlen + - a_req->assoclen); + sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac, + authsize, a_req->cryptlen + a_req->assoclen); if (unlikely(sz != authsize)) { dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; @@ -1933,8 +1913,10 @@ static void sec_aead_exit(struct crypto_aead *tfm) static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) { + struct aead_alg *alg = crypto_aead_alg(tfm); struct sec_ctx *ctx = crypto_aead_ctx(tfm); - struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; + struct sec_auth_ctx *a_ctx = &ctx->a_ctx; + const char *aead_name = alg->base.cra_name; int ret; ret = sec_aead_init(tfm); @@ -1943,11 +1925,20 @@ static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) return ret; } - auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); - if (IS_ERR(auth_ctx->hash_tfm)) { + a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); + if (IS_ERR(a_ctx->hash_tfm)) { dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); - return PTR_ERR(auth_ctx->hash_tfm); + return PTR_ERR(a_ctx->hash_tfm); + } + + a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, + CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); + if (IS_ERR(a_ctx->fallback_aead_tfm)) { + dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); + crypto_free_shash(ctx->a_ctx.hash_tfm); + sec_aead_exit(tfm); + return PTR_ERR(a_ctx->fallback_aead_tfm); } return 0; @@ -1957,6 +1948,7 @@ static void sec_aead_ctx_exit(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); + crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); crypto_free_shash(ctx->a_ctx.hash_tfm); sec_aead_exit(tfm); } @@ -1983,7 +1975,6 @@ static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) sec_aead_exit(tfm); return PTR_ERR(a_ctx->fallback_aead_tfm); } - a_ctx->fallback = false; return 0; } @@ -2264,21 +2255,20 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); - size_t authsize = crypto_aead_authsize(tfm); + size_t sz = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; struct device *dev = ctx->dev; int ret; - if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || - req->assoclen > SEC_MAX_AAD_LEN)) { - dev_err(dev, "aead input spec error!\n"); + /* Hardware does not handle cases where authsize is not 4 bytes aligned */ + if (c_mode == SEC_CMODE_CBC && (sz & WORD_MASK)) { + sreq->aead_req.fallback = true; return -EINVAL; } - if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) || - (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN || - authsize & MAC_LEN_MASK)))) { - dev_err(dev, "aead input mac length error!\n"); + if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || + req->assoclen > SEC_MAX_AAD_LEN)) { + dev_err(dev, "aead input spec error!\n"); return -EINVAL; } @@ -2297,7 +2287,7 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) if (sreq->c_req.encrypt) sreq->c_req.c_len = req->cryptlen; else - sreq->c_req.c_len = req->cryptlen - authsize; + sreq->c_req.c_len = req->cryptlen - sz; if (c_mode == SEC_CMODE_CBC) { if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { dev_err(dev, "aead crypto length error!\n"); @@ -2323,8 +2313,8 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) if (ctx->sec->qm.ver == QM_HW_V2) { if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && - req->cryptlen <= authsize))) { - ctx->a_ctx.fallback = true; + req->cryptlen <= authsize))) { + sreq->aead_req.fallback = true; return -EINVAL; } } @@ -2352,16 +2342,9 @@ static int sec_aead_soft_crypto(struct sec_ctx *ctx, bool encrypt) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; - struct device *dev = ctx->dev; struct aead_request *subreq; int ret; - /* Kunpeng920 aead mode not support input 0 size */ - if (!a_ctx->fallback_aead_tfm) { - dev_err(dev, "aead fallback tfm is NULL!\n"); - return -EINVAL; - } - subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); if (!subreq) return -ENOMEM; @@ -2393,10 +2376,11 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; + req->aead_req.fallback = false; ret = sec_aead_param_check(ctx, req); if (unlikely(ret)) { - if (ctx->a_ctx.fallback) + if (req->aead_req.fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.h b/drivers/crypto/hisilicon/sec2/sec_crypto.h index d033f63b583f..db3fceb88e69 100644 --- a/drivers/crypto/hisilicon/sec2/sec_crypto.h +++ b/drivers/crypto/hisilicon/sec2/sec_crypto.h @@ -23,17 +23,6 @@ enum sec_hash_alg { SEC_A_HMAC_SHA512 = 0x15, }; -enum sec_mac_len { - SEC_HMAC_CCM_MAC = 16, - SEC_HMAC_GCM_MAC = 16, - SEC_SM3_MAC = 32, - SEC_HMAC_SM3_MAC = 32, - SEC_HMAC_MD5_MAC = 16, - SEC_HMAC_SHA1_MAC = 20, - SEC_HMAC_SHA256_MAC = 32, - SEC_HMAC_SHA512_MAC = 64, -}; - enum sec_cmode { SEC_CMODE_ECB = 0x0, SEC_CMODE_CBC = 0x1, diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c index d2ead648767b..8dd4c0b10a74 100644 --- a/drivers/crypto/hisilicon/sec2/sec_main.c +++ b/drivers/crypto/hisilicon/sec2/sec_main.c @@ -1008,11 +1008,15 @@ static u32 sec_get_hw_err_status(struct hisi_qm *qm) static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { - u32 nfe; - writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); - nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); - writel(nfe, qm->io_base + SEC_RAS_NFE_REG); +} + +static void sec_disable_error_report(struct hisi_qm *qm, u32 err_type) +{ + u32 nfe_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); + writel(nfe_mask & (~err_type), qm->io_base + SEC_RAS_NFE_REG); } static void sec_open_axi_master_ooo(struct hisi_qm *qm) @@ -1024,6 +1028,27 @@ static void sec_open_axi_master_ooo(struct hisi_qm *qm) writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); } +static enum acc_err_result sec_get_err_result(struct hisi_qm *qm) +{ + u32 err_status; + + err_status = sec_get_hw_err_status(qm); + if (err_status) { + if (err_status & qm->err_info.ecc_2bits_mask) + qm->err_status.is_dev_ecc_mbit = true; + sec_log_hw_error(qm, err_status); + + if (err_status & qm->err_info.dev_reset_mask) { + /* Disable the same error reporting until device is recovered. */ + sec_disable_error_report(qm, err_status); + return ACC_ERR_NEED_RESET; + } + sec_clear_hw_err_status(qm, err_status); + } + + return ACC_ERR_RECOVERED; +} + static void sec_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; @@ -1050,12 +1075,12 @@ static const struct hisi_qm_err_ini sec_err_ini = { .hw_err_disable = sec_hw_error_disable, .get_dev_hw_err_status = sec_get_hw_err_status, .clear_dev_hw_err_status = sec_clear_hw_err_status, - .log_dev_hw_err = sec_log_hw_error, .open_axi_master_ooo = sec_open_axi_master_ooo, .open_sva_prefetch = sec_open_sva_prefetch, .close_sva_prefetch = sec_close_sva_prefetch, .show_last_dfx_regs = sec_show_last_dfx_regs, .err_info_init = sec_err_info_init, + .get_err_result = sec_get_err_result, }; static int sec_pf_probe_init(struct sec_dev *sec) diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c index a8d5d105b354..86e517812093 100644 --- a/drivers/crypto/hisilicon/zip/zip_main.c +++ b/drivers/crypto/hisilicon/zip/zip_main.c @@ -1069,11 +1069,15 @@ static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { - u32 nfe; - writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); - nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); - writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); +} + +static void hisi_zip_disable_error_report(struct hisi_qm *qm, u32 err_type) +{ + u32 nfe_mask; + + nfe_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); + writel(nfe_mask & (~err_type), qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); } static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) @@ -1103,6 +1107,27 @@ static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) qm->io_base + HZIP_CORE_INT_SET); } +static enum acc_err_result hisi_zip_get_err_result(struct hisi_qm *qm) +{ + u32 err_status; + + err_status = hisi_zip_get_hw_err_status(qm); + if (err_status) { + if (err_status & qm->err_info.ecc_2bits_mask) + qm->err_status.is_dev_ecc_mbit = true; + hisi_zip_log_hw_error(qm, err_status); + + if (err_status & qm->err_info.dev_reset_mask) { + /* Disable the same error reporting until device is recovered. */ + hisi_zip_disable_error_report(qm, err_status); + return ACC_ERR_NEED_RESET; + } + hisi_zip_clear_hw_err_status(qm, err_status); + } + + return ACC_ERR_RECOVERED; +} + static void hisi_zip_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; @@ -1130,13 +1155,13 @@ static const struct hisi_qm_err_ini hisi_zip_err_ini = { .hw_err_disable = hisi_zip_hw_error_disable, .get_dev_hw_err_status = hisi_zip_get_hw_err_status, .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status, - .log_dev_hw_err = hisi_zip_log_hw_error, .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, .open_sva_prefetch = hisi_zip_open_sva_prefetch, .close_sva_prefetch = hisi_zip_close_sva_prefetch, .show_last_dfx_regs = hisi_zip_show_last_dfx_regs, .err_info_init = hisi_zip_err_info_init, + .get_err_result = hisi_zip_get_err_result, }; static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c index 103fc551d2af..ecf64cc35fff 100644 --- a/drivers/crypto/inside-secure/safexcel_hash.c +++ b/drivers/crypto/inside-secure/safexcel_hash.c @@ -2119,7 +2119,7 @@ static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm) safexcel_ahash_cra_init(tfm); ctx->aes = kmalloc(sizeof(*ctx->aes), GFP_KERNEL); - return PTR_ERR_OR_ZERO(ctx->aes); + return ctx->aes == NULL ? -ENOMEM : 0; } static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm) diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index d39a386b31ac..cfb149302e0a 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -469,6 +469,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } npe_id = npe_spec.args[0]; + of_node_put(npe_spec.np); ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0, &queue_spec); @@ -477,6 +478,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } recv_qid = queue_spec.args[0]; + of_node_put(queue_spec.np); ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0, &queue_spec); @@ -485,6 +487,7 @@ static int init_ixp_crypto(struct device *dev) return -ENODEV; } send_qid = queue_spec.args[0]; + of_node_put(queue_spec.np); } else { /* * Hardcoded engine when using platform data, this goes away diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c index 811ded72ce5f..798bb40fed68 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_reqmgr.c @@ -410,9 +410,10 @@ static int cpt_process_ccode(struct otx2_cptlfs_info *lfs, break; } - dev_err(&pdev->dev, - "Request failed with software error code 0x%x\n", - cpt_status->s.uc_compcode); + pr_debug("Request failed with software error code 0x%x: algo = %s driver = %s\n", + cpt_status->s.uc_compcode, + info->req->areq->tfm->__crt_alg->cra_name, + info->req->areq->tfm->__crt_alg->cra_driver_name); otx2_cpt_dump_sg_list(pdev, info->req); break; } diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c index 3ea334b7f820..1b0308ce7ce7 100644 --- a/drivers/crypto/nx/nx-common-pseries.c +++ b/drivers/crypto/nx/nx-common-pseries.c @@ -1142,6 +1142,7 @@ static void __init nxcop_get_capabilities(void) { struct hv_vas_all_caps *hv_caps; struct hv_nx_cop_caps *hv_nxc; + u64 feat; int rc; hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); @@ -1152,27 +1153,26 @@ static void __init nxcop_get_capabilities(void) */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, (u64)virt_to_phys(hv_caps)); + if (!rc) + feat = be64_to_cpu(hv_caps->feat_type); + kfree(hv_caps); if (rc) - goto out; + return; + if (!(feat & VAS_NX_GZIP_FEAT_BIT)) + return; - caps_feat = be64_to_cpu(hv_caps->feat_type); /* * NX-GZIP feature available */ - if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { - hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); - if (!hv_nxc) - goto out; - /* - * Get capabilities for NX-GZIP feature - */ - rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, - VAS_NX_GZIP_FEAT, - (u64)virt_to_phys(hv_nxc)); - } else { - pr_err("NX-GZIP feature is not available\n"); - rc = -EINVAL; - } + hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); + if (!hv_nxc) + return; + /* + * Get capabilities for NX-GZIP feature + */ + rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, + VAS_NX_GZIP_FEAT, + (u64)virt_to_phys(hv_nxc)); if (!rc) { nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); @@ -1182,13 +1182,10 @@ static void __init nxcop_get_capabilities(void) be64_to_cpu(hv_nxc->min_compress_len); nx_cop_caps.min_decompress_len = be64_to_cpu(hv_nxc->min_decompress_len); - } else { - caps_feat = 0; + caps_feat = feat; } kfree(hv_nxc); -out: - kfree(hv_caps); } static const struct vio_device_id nx842_vio_driver_ids[] = { diff --git a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c index 64e4596a24f4..fd39cbcdec03 100644 --- a/drivers/crypto/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/qat/qat_common/adf_hw_arbiter.c @@ -90,10 +90,6 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev) hw_data->get_arb_info(&info); - /* Reset arbiter configuration */ - for (i = 0; i < ADF_ARB_NUM; i++) - WRITE_CSR_ARB_SARCONFIG(csr, arb_off, i, 0); - /* Unmap worker threads to service arbiters */ for (i = 0; i < hw_data->num_engines; i++) WRITE_CSR_ARB_WT2SAM(csr, arb_off, wt_off, i, 0); diff --git a/drivers/crypto/qce/aead.c b/drivers/crypto/qce/aead.c index 6eb4d2e35629..6fe41ae98252 100644 --- a/drivers/crypto/qce/aead.c +++ b/drivers/crypto/qce/aead.c @@ -786,7 +786,7 @@ static int qce_aead_register_one(const struct qce_aead_def *def, struct qce_devi alg->init = qce_aead_init; alg->exit = qce_aead_exit; - alg->base.cra_priority = 300; + alg->base.cra_priority = 275; alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index d3780be44a76..df4ae3377b61 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -48,16 +48,19 @@ static void qce_unregister_algs(struct qce_device *qce) static int qce_register_algs(struct qce_device *qce) { const struct qce_algo_ops *ops; - int i, ret = -ENODEV; + int i, j, ret = -ENODEV; for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { ops = qce_ops[i]; ret = ops->register_algs(qce); - if (ret) - break; + if (ret) { + for (j = i - 1; j >= 0; j--) + ops->unregister_algs(qce); + return ret; + } } - return ret; + return 0; } static int qce_handle_request(struct crypto_async_request *async_req) @@ -236,7 +239,7 @@ static int qce_crypto_probe(struct platform_device *pdev) ret = qce_check_version(qce); if (ret) - goto err_clks; + goto err_dma; spin_lock_init(&qce->lock); tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c index 37bafd7aeb79..f950f1bfc7ea 100644 --- a/drivers/crypto/qce/sha.c +++ b/drivers/crypto/qce/sha.c @@ -482,7 +482,7 @@ static int qce_ahash_register_one(const struct qce_ahash_def *def, base = &alg->halg.base; base->cra_blocksize = def->blocksize; - base->cra_priority = 300; + base->cra_priority = 175; base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; base->cra_ctxsize = sizeof(struct qce_sha_ctx); base->cra_alignmask = 0; diff --git a/drivers/crypto/qce/skcipher.c b/drivers/crypto/qce/skcipher.c index 5b493fdc1e74..ffb334eb5b34 100644 --- a/drivers/crypto/qce/skcipher.c +++ b/drivers/crypto/qce/skcipher.c @@ -461,7 +461,7 @@ static int qce_skcipher_register_one(const struct qce_skcipher_def *def, alg->encrypt = qce_skcipher_encrypt; alg->decrypt = qce_skcipher_decrypt; - alg->base.cra_priority = 300; + alg->base.cra_priority = 275; alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; diff --git a/drivers/cxl/core/pci.c b/drivers/cxl/core/pci.c index 8d92a24fd73d..97adf9a7ea89 100644 --- a/drivers/cxl/core/pci.c +++ b/drivers/cxl/core/pci.c @@ -377,7 +377,7 @@ static bool __cxl_hdm_decode_init(struct cxl_dev_state *cxlds, if (!allowed && info->mem_enabled) { dev_err(dev, "Range register decodes outside platform defined CXL ranges.\n"); - return -ENXIO; + return false; } /* diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 5b7d848a6f01..0e03c4908050 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -974,6 +974,7 @@ static int cxl_port_setup_targets(struct cxl_port *port, struct cxl_region_params *p = &cxlr->params; struct cxl_decoder *cxld = cxl_rr->decoder; struct cxl_switch_decoder *cxlsd; + struct cxl_port *iter = port; u16 eig, peig; u8 eiw, peiw; @@ -990,16 +991,26 @@ static int cxl_port_setup_targets(struct cxl_port *port, cxlsd = to_cxl_switch_decoder(&cxld->dev); if (cxl_rr->nr_targets_set) { - int i, distance; + int i, distance = 1; + struct cxl_region_ref *cxl_rr_iter; /* - * Passthrough decoders impose no distance requirements between - * peers + * The "distance" between peer downstream ports represents which + * endpoint positions in the region interleave a given port can + * host. + * + * For example, at the root of a hierarchy the distance is + * always 1 as every index targets a different host-bridge. At + * each subsequent switch level those ports map every Nth region + * position where N is the width of the switch == distance. */ - if (cxl_rr->nr_targets == 1) - distance = 0; - else - distance = p->nr_targets / cxl_rr->nr_targets; + do { + cxl_rr_iter = cxl_rr_load(iter, cxlr); + distance *= cxl_rr_iter->nr_targets; + iter = to_cxl_port(iter->dev.parent); + } while (!is_cxl_root(iter)); + distance *= cxlrd->cxlsd.cxld.interleave_ways; + for (i = 0; i < cxl_rr->nr_targets_set; i++) if (ep->dport == cxlsd->target[i]) { rc = check_last_peer(cxled, ep, cxl_rr, diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile deleted file mode 100644 index 191c31f0d4f0..000000000000 --- a/drivers/dax/pmem/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o -obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o - -dax_pmem-y := pmem.o -dax_pmem_core-y := core.o -dax_pmem_compat-y := compat.o diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c deleted file mode 100644 index dfe91a2990fe..000000000000 --- a/drivers/dax/pmem/pmem.c +++ /dev/null @@ -1,10 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */ -#include -#include -#include -#include -#include -#include "../bus.h" - - diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c index 5c8a7084577b..69cb272f65e6 100644 --- a/drivers/dma-buf/dma-fence-array.c +++ b/drivers/dma-buf/dma-fence-array.c @@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence) static bool dma_fence_array_signaled(struct dma_fence *fence) { struct dma_fence_array *array = to_dma_fence_array(fence); + int num_pending; + unsigned int i; - if (atomic_read(&array->num_pending) > 0) + /* + * We need to read num_pending before checking the enable_signal bit + * to avoid racing with the enable_signaling() implementation, which + * might decrement the counter, and cause a partial check. + * atomic_read_acquire() pairs with atomic_dec_and_test() in + * dma_fence_array_enable_signaling() + * + * The !--num_pending check is here to account for the any_signaled case + * if we race with enable_signaling(), that means the !num_pending check + * in the is_signalling_enabled branch might be outdated (num_pending + * might have been decremented), but that's fine. The user will get the + * right value when testing again later. + */ + num_pending = atomic_read_acquire(&array->num_pending); + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) { + if (num_pending <= 0) + goto signal; return false; + } + for (i = 0; i < array->num_fences; ++i) { + if (dma_fence_is_signaled(array->fences[i]) && !--num_pending) + goto signal; + } + return false; + +signal: dma_fence_array_clear_pending_error(array); return true; } diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c index 628af51c81af..6345062731f1 100644 --- a/drivers/dma-buf/dma-fence-unwrap.c +++ b/drivers/dma-buf/dma-fence-unwrap.c @@ -12,6 +12,7 @@ #include #include #include +#include /* Internal helper to start new array iteration, don't use directly */ static struct dma_fence * @@ -59,6 +60,25 @@ struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor) } EXPORT_SYMBOL_GPL(dma_fence_unwrap_next); + +static int fence_cmp(const void *_a, const void *_b) +{ + struct dma_fence *a = *(struct dma_fence **)_a; + struct dma_fence *b = *(struct dma_fence **)_b; + + if (a->context < b->context) + return -1; + else if (a->context > b->context) + return 1; + + if (dma_fence_is_later(b, a)) + return 1; + else if (dma_fence_is_later(a, b)) + return -1; + + return 0; +} + /* Implementation for the dma_fence_merge() marco, don't use directly */ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, struct dma_fence **fences, @@ -67,8 +87,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, struct dma_fence_array *result; struct dma_fence *tmp, **array; ktime_t timestamp; - unsigned int i; - size_t count; + int i, j, count; count = 0; timestamp = ns_to_ktime(0); @@ -96,78 +115,55 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, if (!array) return NULL; - /* - * This trashes the input fence array and uses it as position for the - * following merge loop. This works because the dma_fence_merge() - * wrapper macro is creating this temporary array on the stack together - * with the iterators. - */ - for (i = 0; i < num_fences; ++i) - fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]); - count = 0; - do { - unsigned int sel; - -restart: - tmp = NULL; - for (i = 0; i < num_fences; ++i) { - struct dma_fence *next; - - while (fences[i] && dma_fence_is_signaled(fences[i])) - fences[i] = dma_fence_unwrap_next(&iter[i]); - - next = fences[i]; - if (!next) - continue; - - /* - * We can't guarantee that inpute fences are ordered by - * context, but it is still quite likely when this - * function is used multiple times. So attempt to order - * the fences by context as we pass over them and merge - * fences with the same context. - */ - if (!tmp || tmp->context > next->context) { - tmp = next; - sel = i; - - } else if (tmp->context < next->context) { - continue; - - } else if (dma_fence_is_later(tmp, next)) { - fences[i] = dma_fence_unwrap_next(&iter[i]); - goto restart; + for (i = 0; i < num_fences; ++i) { + dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { + if (!dma_fence_is_signaled(tmp)) { + array[count++] = dma_fence_get(tmp); } else { - fences[sel] = dma_fence_unwrap_next(&iter[sel]); - goto restart; + ktime_t t = dma_fence_timestamp(tmp); + + if (ktime_after(t, timestamp)) + timestamp = t; } } + } - if (tmp) { - array[count++] = dma_fence_get(tmp); - fences[sel] = dma_fence_unwrap_next(&iter[sel]); + if (count == 0 || count == 1) + goto return_fastpath; + + sort(array, count, sizeof(*array), fence_cmp, NULL); + + /* + * Only keep the most recent fence for each context. + */ + j = 0; + for (i = 1; i < count; i++) { + if (array[i]->context == array[j]->context) + dma_fence_put(array[i]); + else + array[++j] = array[i]; + } + count = ++j; + + if (count > 1) { + result = dma_fence_array_create(count, array, + dma_fence_context_alloc(1), + 1, false); + if (!result) { + for (i = 0; i < count; i++) + dma_fence_put(array[i]); + tmp = NULL; + goto return_tmp; } - } while (tmp); - - if (count == 0) { - tmp = dma_fence_allocate_private_stub(ktime_get()); - goto return_tmp; + return &result->base; } - if (count == 1) { +return_fastpath: + if (count == 0) + tmp = dma_fence_allocate_private_stub(timestamp); + else tmp = array[0]; - goto return_tmp; - } - - result = dma_fence_array_create(count, array, - dma_fence_context_alloc(1), - 1, false); - if (!result) { - tmp = NULL; - goto return_tmp; - } - return &result->base; return_tmp: kfree(array); diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index e78ff9333c7a..759b4f80baee 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -308,8 +308,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, count++; dma_resv_list_set(fobj, i, fence, usage); - /* pointer update must be visible before we extend the num_fences */ - smp_store_mb(fobj->num_fences, count); + /* fence update must be visible before we extend the num_fences */ + smp_wmb(); + fobj->num_fences = count; } EXPORT_SYMBOL(dma_resv_add_fence); diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 2bcdb935a3ac..546bba502fbc 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -164,7 +164,7 @@ static const struct dma_buf_ops udmabuf_ops = { }; #define SEALS_WANTED (F_SEAL_SHRINK) -#define SEALS_DENIED (F_SEAL_WRITE) +#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE) static long udmabuf_create(struct miscdevice *device, struct udmabuf_create_list *head, @@ -186,7 +186,7 @@ static long udmabuf_create(struct miscdevice *device, if (!ubuf) return -ENOMEM; - pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; + pglimit = ((u64)size_limit_mb * 1024 * 1024) >> PAGE_SHIFT; for (i = 0; i < head->count; i++) { if (!IS_ALIGNED(list[i].offset, PAGE_SIZE)) goto err; diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c index cac4532fe23a..1e6d13278c5a 100644 --- a/drivers/dma/apple-admac.c +++ b/drivers/dma/apple-admac.c @@ -152,6 +152,8 @@ static int admac_alloc_sram_carveout(struct admac_data *ad, { struct admac_sram *sram; int i, ret = 0, nblocks; + ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE); + ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE); if (dir == DMA_MEM_TO_DEV) sram = &ad->txcache; @@ -911,12 +913,7 @@ static int admac_probe(struct platform_device *pdev) goto free_irq; } - ad->txcache.size = readl_relaxed(ad->base + REG_TX_SRAM_SIZE); - ad->rxcache.size = readl_relaxed(ad->base + REG_RX_SRAM_SIZE); - dev_info(&pdev->dev, "Audio DMA Controller\n"); - dev_info(&pdev->dev, "imprint %x TX cache %u RX cache %u\n", - readl_relaxed(ad->base + REG_IMPRINT), ad->txcache.size, ad->rxcache.size); return 0; diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 7919906b02e7..c457aaf15231 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1287,6 +1287,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, return NULL; desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value); + if (!desc) + return NULL; list_add_tail(&desc->desc_node, &desc->descs_list); desc->tx_dma_desc.cookie = -EBUSY; diff --git a/drivers/dma/dw/acpi.c b/drivers/dma/dw/acpi.c index c510c109d2c3..b6452fffa657 100644 --- a/drivers/dma/dw/acpi.c +++ b/drivers/dma/dw/acpi.c @@ -8,13 +8,15 @@ static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) { + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_dma_chip_pdata *data = dev_get_drvdata(dw->dma.dev); struct acpi_dma_spec *dma_spec = param; struct dw_dma_slave slave = { .dma_dev = dma_spec->dev, .src_id = dma_spec->slave_id, .dst_id = dma_spec->slave_id, - .m_master = 0, - .p_master = 1, + .m_master = data->m_master, + .p_master = data->p_master, }; return dw_dma_filter(chan, &slave); diff --git a/drivers/dma/dw/internal.h b/drivers/dma/dw/internal.h index 563ce73488db..f1bd06a20cd6 100644 --- a/drivers/dma/dw/internal.h +++ b/drivers/dma/dw/internal.h @@ -51,11 +51,15 @@ struct dw_dma_chip_pdata { int (*probe)(struct dw_dma_chip *chip); int (*remove)(struct dw_dma_chip *chip); struct dw_dma_chip *chip; + u8 m_master; + u8 p_master; }; static __maybe_unused const struct dw_dma_chip_pdata dw_dma_chip_pdata = { .probe = dw_dma_probe, .remove = dw_dma_remove, + .m_master = 0, + .p_master = 1, }; static const struct dw_dma_platform_data idma32_pdata = { @@ -72,6 +76,8 @@ static __maybe_unused const struct dw_dma_chip_pdata idma32_chip_pdata = { .pdata = &idma32_pdata, .probe = idma32_dma_probe, .remove = idma32_dma_remove, + .m_master = 0, + .p_master = 0, }; static const struct dw_dma_platform_data xbar_pdata = { @@ -88,6 +94,8 @@ static __maybe_unused const struct dw_dma_chip_pdata xbar_chip_pdata = { .pdata = &xbar_pdata, .probe = idma32_dma_probe, .remove = idma32_dma_remove, + .m_master = 0, + .p_master = 0, }; #endif /* _DMA_DW_INTERNAL_H */ diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index ad2d4d012cf7..e8a0eb81726a 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -56,10 +56,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) if (ret) return ret; - dw_dma_acpi_controller_register(chip->dw); - pci_set_drvdata(pdev, data); + dw_dma_acpi_controller_register(chip->dw); + return 0; } diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index 9f8adb7013eb..9b07474f450b 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -12,7 +12,9 @@ #include #include #include +#include #include +#include #include "registers.h" #include "idxd.h" @@ -35,6 +37,7 @@ struct idxd_user_context { struct idxd_wq *wq; struct task_struct *task; unsigned int pasid; + struct mm_struct *mm; unsigned int flags; struct iommu_sva *sva; }; @@ -69,6 +72,19 @@ static inline struct idxd_wq *inode_wq(struct inode *inode) return idxd_cdev->wq; } +static void idxd_xa_pasid_remove(struct idxd_user_context *ctx) +{ + struct idxd_wq *wq = ctx->wq; + void *ptr; + + mutex_lock(&wq->uc_lock); + ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL); + if (ptr != (void *)ctx) + dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n", + ctx->pasid); + mutex_unlock(&wq->uc_lock); +} + static int idxd_cdev_open(struct inode *inode, struct file *filp) { struct idxd_user_context *ctx; @@ -109,20 +125,25 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) pasid = iommu_sva_get_pasid(sva); if (pasid == IOMMU_PASID_INVALID) { - iommu_sva_unbind_device(sva); rc = -EINVAL; - goto failed; + goto failed_get_pasid; } ctx->sva = sva; ctx->pasid = pasid; + ctx->mm = current->mm; + + mutex_lock(&wq->uc_lock); + rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL); + mutex_unlock(&wq->uc_lock); + if (rc < 0) + dev_warn(dev, "PASID entry already exist in xarray.\n"); if (wq_dedicated(wq)) { rc = idxd_wq_set_pasid(wq, pasid); if (rc < 0) { - iommu_sva_unbind_device(sva); dev_err(dev, "wq set pasid failed: %d\n", rc); - goto failed; + goto failed_set_pasid; } } } @@ -131,7 +152,13 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp) mutex_unlock(&wq->wq_lock); return 0; - failed: +failed_set_pasid: + if (device_user_pasid_enabled(idxd)) + idxd_xa_pasid_remove(ctx); +failed_get_pasid: + if (device_user_pasid_enabled(idxd)) + iommu_sva_unbind_device(sva); +failed: mutex_unlock(&wq->wq_lock); kfree(ctx); return rc; @@ -162,8 +189,10 @@ static int idxd_cdev_release(struct inode *node, struct file *filep) } } - if (ctx->sva) + if (ctx->sva) { iommu_sva_unbind_device(ctx->sva); + idxd_xa_pasid_remove(ctx); + } kfree(ctx); mutex_lock(&wq->wq_lock); idxd_wq_put(wq); @@ -210,6 +239,9 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) return -EPERM; + if (current->mm != ctx->mm) + return -EPERM; + rc = check_vma(wq, vma, __func__); if (rc < 0) return rc; @@ -276,6 +308,9 @@ static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t ssize_t written = 0; int i; + if (current->mm != ctx->mm) + return -EPERM; + for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { int rc = idxd_submit_user_descriptor(ctx, udesc + i); @@ -296,6 +331,9 @@ static __poll_t idxd_cdev_poll(struct file *filp, struct idxd_device *idxd = wq->idxd; __poll_t out = 0; + if (current->mm != ctx->mm) + return POLLNVAL; + poll_wait(filp, &wq->err_queue, wait); spin_lock(&idxd->dev_lock); if (idxd->sw_err.valid) @@ -408,6 +446,13 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) } mutex_lock(&wq->wq_lock); + + wq->wq = create_workqueue(dev_name(wq_confdev(wq))); + if (!wq->wq) { + rc = -ENOMEM; + goto wq_err; + } + wq->type = IDXD_WQT_USER; rc = drv_enable_wq(wq); if (rc < 0) @@ -426,7 +471,9 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) err_cdev: drv_disable_wq(wq); err: + destroy_workqueue(wq->wq); wq->type = IDXD_WQT_NONE; +wq_err: mutex_unlock(&wq->wq_lock); return rc; } @@ -439,6 +486,8 @@ static void idxd_user_drv_remove(struct idxd_dev *idxd_dev) idxd_wq_del_cdev(wq); drv_disable_wq(wq); wq->type = IDXD_WQT_NONE; + destroy_workqueue(wq->wq); + wq->wq = NULL; mutex_unlock(&wq->wq_lock); } @@ -485,3 +534,70 @@ void idxd_cdev_remove(void) ida_destroy(&ictx[i].minor_ida); } } + +/** + * idxd_copy_cr - copy completion record to user address space found by wq and + * PASID + * @wq: work queue + * @pasid: PASID + * @addr: user fault address to write + * @cr: completion record + * @len: number of bytes to copy + * + * This is called by a work that handles completion record fault. + * + * Return: number of bytes copied. + */ +int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, + void *cr, int len) +{ + struct device *dev = &wq->idxd->pdev->dev; + int left = len, status_size = 1; + struct idxd_user_context *ctx; + struct mm_struct *mm; + + mutex_lock(&wq->uc_lock); + + ctx = xa_load(&wq->upasid_xa, pasid); + if (!ctx) { + dev_warn(dev, "No user context\n"); + goto out; + } + + mm = ctx->mm; + /* + * The completion record fault handling work is running in kernel + * thread context. It temporarily switches to the mm to copy cr + * to addr in the mm. + */ + kthread_use_mm(mm); + left = copy_to_user((void __user *)addr + status_size, cr + status_size, + len - status_size); + /* + * Copy status only after the rest of completion record is copied + * successfully so that the user gets the complete completion record + * when a non-zero status is polled. + */ + if (!left) { + u8 status; + + /* + * Ensure that the completion record's status field is written + * after the rest of the completion record has been written. + * This ensures that the user receives the correct completion + * record information once polling for a non-zero status. + */ + wmb(); + status = *(u8 *)cr; + if (put_user(status, (u8 __user *)addr)) + left += status_size; + } else { + left += status_size; + } + kthread_unuse_mm(mm); + +out: + mutex_unlock(&wq->uc_lock); + + return len - left; +} diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 14c6ef987fed..c3ace4aed0fc 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -185,6 +185,7 @@ struct idxd_wq { struct idxd_dev idxd_dev; struct idxd_cdev *idxd_cdev; struct wait_queue_head err_queue; + struct workqueue_struct *wq; struct idxd_device *idxd; int id; struct idxd_irq_entry ie; @@ -214,6 +215,10 @@ struct idxd_wq { char name[WQ_NAME_SIZE + 1]; u64 max_xfer_bytes; u32 max_batch_size; + + /* Lock to protect upasid_xa access. */ + struct mutex uc_lock; + struct xarray upasid_xa; }; struct idxd_engine { @@ -665,6 +670,8 @@ void idxd_cdev_remove(void); int idxd_cdev_get_major(struct idxd_device *idxd); int idxd_wq_add_cdev(struct idxd_wq *wq); void idxd_wq_del_cdev(struct idxd_wq *wq); +int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, + void *buf, int len); /* perfmon */ #if IS_ENABLED(CONFIG_INTEL_IDXD_PERFMON) diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 30193195c813..ea651d5cf332 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -141,6 +141,25 @@ static void idxd_cleanup_interrupts(struct idxd_device *idxd) pci_free_irq_vectors(pdev); } +static void idxd_clean_wqs(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); + conf_dev = wq_confdev(wq); + put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + kfree(idxd->wqs); +} + static int idxd_setup_wqs(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -155,8 +174,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) idxd->wq_enable_map = bitmap_zalloc_node(idxd->max_wqs, GFP_KERNEL, dev_to_node(dev)); if (!idxd->wq_enable_map) { - kfree(idxd->wqs); - return -ENOMEM; + rc = -ENOMEM; + goto err_bitmap; } for (i = 0; i < idxd->max_wqs; i++) { @@ -175,10 +194,8 @@ static int idxd_setup_wqs(struct idxd_device *idxd) conf_dev->bus = &dsa_bus_type; conf_dev->type = &idxd_wq_device_type; rc = dev_set_name(conf_dev, "wq%d.%d", idxd->id, wq->id); - if (rc < 0) { - put_device(conf_dev); + if (rc < 0) goto err; - } mutex_init(&wq->wq_lock); init_waitqueue_head(&wq->err_queue); @@ -189,7 +206,6 @@ static int idxd_setup_wqs(struct idxd_device *idxd) wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { - put_device(conf_dev); rc = -ENOMEM; goto err; } @@ -197,26 +213,58 @@ static int idxd_setup_wqs(struct idxd_device *idxd) if (idxd->hw.wq_cap.op_config) { wq->opcap_bmap = bitmap_zalloc(IDXD_MAX_OPCAP_BITS, GFP_KERNEL); if (!wq->opcap_bmap) { - put_device(conf_dev); rc = -ENOMEM; - goto err; + goto err_opcap_bmap; } bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } + mutex_init(&wq->uc_lock); + xa_init(&wq->upasid_xa); idxd->wqs[i] = wq; } return 0; - err: +err_opcap_bmap: + kfree(wq->wqcfg); + +err: + put_device(conf_dev); + kfree(wq); + while (--i >= 0) { wq = idxd->wqs[i]; + if (idxd->hw.wq_cap.op_config) + bitmap_free(wq->opcap_bmap); + kfree(wq->wqcfg); conf_dev = wq_confdev(wq); put_device(conf_dev); + kfree(wq); + } + bitmap_free(idxd->wq_enable_map); + +err_bitmap: + kfree(idxd->wqs); + return rc; } +static void idxd_clean_engines(struct idxd_device *idxd) +{ + struct idxd_engine *engine; + struct device *conf_dev; + int i; + + for (i = 0; i < idxd->max_engines; i++) { + engine = idxd->engines[i]; + conf_dev = engine_confdev(engine); + put_device(conf_dev); + kfree(engine); + } + kfree(idxd->engines); +} + static int idxd_setup_engines(struct idxd_device *idxd) { struct idxd_engine *engine; @@ -247,6 +295,7 @@ static int idxd_setup_engines(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "engine%d.%d", idxd->id, engine->id); if (rc < 0) { put_device(conf_dev); + kfree(engine); goto err; } @@ -260,10 +309,26 @@ static int idxd_setup_engines(struct idxd_device *idxd) engine = idxd->engines[i]; conf_dev = engine_confdev(engine); put_device(conf_dev); + kfree(engine); } + kfree(idxd->engines); + return rc; } +static void idxd_clean_groups(struct idxd_device *idxd) +{ + struct idxd_group *group; + int i; + + for (i = 0; i < idxd->max_groups; i++) { + group = idxd->groups[i]; + put_device(group_confdev(group)); + kfree(group); + } + kfree(idxd->groups); +} + static int idxd_setup_groups(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; @@ -294,6 +359,7 @@ static int idxd_setup_groups(struct idxd_device *idxd) rc = dev_set_name(conf_dev, "group%d.%d", idxd->id, group->id); if (rc < 0) { put_device(conf_dev); + kfree(group); goto err; } @@ -313,27 +379,25 @@ static int idxd_setup_groups(struct idxd_device *idxd) while (--i >= 0) { group = idxd->groups[i]; put_device(group_confdev(group)); + kfree(group); } + kfree(idxd->groups); + return rc; } static void idxd_cleanup_internals(struct idxd_device *idxd) { - int i; - - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_groups(idxd); + idxd_clean_engines(idxd); + idxd_clean_wqs(idxd); destroy_workqueue(idxd->wq); } static int idxd_setup_internals(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; - int rc, i; + int rc; init_waitqueue_head(&idxd->cmd_waitq); @@ -358,14 +422,11 @@ static int idxd_setup_internals(struct idxd_device *idxd) return 0; err_wkq_create: - for (i = 0; i < idxd->max_groups; i++) - put_device(group_confdev(idxd->groups[i])); + idxd_clean_groups(idxd); err_group: - for (i = 0; i < idxd->max_engines; i++) - put_device(engine_confdev(idxd->engines[i])); + idxd_clean_engines(idxd); err_engine: - for (i = 0; i < idxd->max_wqs; i++) - put_device(wq_confdev(idxd->wqs[i])); + idxd_clean_wqs(idxd); err_wqs: return rc; } @@ -461,6 +522,17 @@ static void idxd_read_caps(struct idxd_device *idxd) multi_u64_to_bmap(idxd->opcap_bmap, &idxd->hw.opcap.bits[0], 4); } +static void idxd_free(struct idxd_device *idxd) +{ + if (!idxd) + return; + + put_device(idxd_confdev(idxd)); + bitmap_free(idxd->opcap_bmap); + ida_free(&idxd_ida, idxd->id); + kfree(idxd); +} + static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_data *data) { struct device *dev = &pdev->dev; @@ -478,28 +550,34 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, struct idxd_driver_d idxd_dev_set_type(&idxd->idxd_dev, idxd->data->type); idxd->id = ida_alloc(&idxd_ida, GFP_KERNEL); if (idxd->id < 0) - return NULL; + goto err_ida; idxd->opcap_bmap = bitmap_zalloc_node(IDXD_MAX_OPCAP_BITS, GFP_KERNEL, dev_to_node(dev)); - if (!idxd->opcap_bmap) { - ida_free(&idxd_ida, idxd->id); - return NULL; - } + if (!idxd->opcap_bmap) + goto err_opcap; device_initialize(conf_dev); conf_dev->parent = dev; conf_dev->bus = &dsa_bus_type; conf_dev->type = idxd->data->dev_type; rc = dev_set_name(conf_dev, "%s%d", idxd->data->name_prefix, idxd->id); - if (rc < 0) { - put_device(conf_dev); - return NULL; - } + if (rc < 0) + goto err_name; spin_lock_init(&idxd->dev_lock); spin_lock_init(&idxd->cmd_lock); return idxd; + +err_name: + put_device(conf_dev); + bitmap_free(idxd->opcap_bmap); +err_opcap: + ida_free(&idxd_ida, idxd->id); +err_ida: + kfree(idxd); + + return NULL; } static int idxd_enable_system_pasid(struct idxd_device *idxd) @@ -674,7 +752,7 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) err: pci_iounmap(pdev, idxd->reg_base); err_iomap: - put_device(idxd_confdev(idxd)); + idxd_free(idxd); err_idxd_alloc: pci_disable_device(pdev); return rc; @@ -737,6 +815,7 @@ static void idxd_remove(struct pci_dev *pdev) destroy_workqueue(idxd->wq); perfmon_pmu_remove(idxd); put_device(idxd_confdev(idxd)); + idxd_free(idxd); } static struct pci_driver idxd_pci_driver = { diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index c811757d0f97..0689464c4816 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1315,6 +1315,7 @@ static void idxd_conf_wq_release(struct device *dev) bitmap_free(wq->opcap_bmap); kfree(wq->wqcfg); + xa_destroy(&wq->upasid_xa); kfree(wq); } diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 23b232b57518..ea48661e87ea 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -1393,6 +1393,7 @@ static int mv_xor_probe(struct platform_device *pdev) irq = irq_of_parse_and_map(np, 0); if (!irq) { ret = -ENODEV; + of_node_put(np); goto err_channel_add; } @@ -1401,6 +1402,7 @@ static int mv_xor_probe(struct platform_device *pdev) if (IS_ERR(chan)) { ret = PTR_ERR(chan); irq_dispose_mapping(irq); + of_node_put(np); goto err_channel_add; } diff --git a/drivers/dma/tegra186-gpc-dma.c b/drivers/dma/tegra186-gpc-dma.c index e70b7c41dcab..7433d0da34f1 100644 --- a/drivers/dma/tegra186-gpc-dma.c +++ b/drivers/dma/tegra186-gpc-dma.c @@ -228,6 +228,7 @@ struct tegra_dma_channel { bool config_init; char name[30]; enum dma_transfer_direction sid_dir; + enum dma_status status; int id; int irq; int slave_id; @@ -389,6 +390,8 @@ static int tegra_dma_pause(struct tegra_dma_channel *tdc) tegra_dma_dump_chan_regs(tdc); } + tdc->status = DMA_PAUSED; + return ret; } @@ -415,6 +418,8 @@ static void tegra_dma_resume(struct tegra_dma_channel *tdc) val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE; tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); + + tdc->status = DMA_IN_PROGRESS; } static int tegra_dma_device_resume(struct dma_chan *dc) @@ -540,6 +545,7 @@ static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc) tegra_dma_sid_free(tdc); tdc->dma_desc = NULL; + tdc->status = DMA_COMPLETE; } static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc, @@ -712,6 +718,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) tdc->dma_desc = NULL; } + tdc->status = DMA_COMPLETE; tegra_dma_sid_free(tdc); vchan_get_all_descriptors(&tdc->vc, &head); spin_unlock_irqrestore(&tdc->vc.lock, flags); @@ -765,6 +772,9 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, if (ret == DMA_COMPLETE) return ret; + if (tdc->status == DMA_PAUSED) + ret = DMA_PAUSED; + spin_lock_irqsave(&tdc->vc.lock, flags); vd = vchan_find_desc(&tdc->vc, cookie); if (vd) { diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 9212ac9f978f..89e06c87a258 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -209,7 +209,6 @@ struct edma_desc { struct edma_cc; struct edma_tc { - struct device_node *node; u16 id; }; @@ -2475,13 +2474,13 @@ static int edma_probe(struct platform_device *pdev) if (ret || i == ecc->num_tc) break; - ecc->tc_list[i].node = tc_args.np; ecc->tc_list[i].id = i; queue_priority_mapping[i][1] = tc_args.args[0]; if (queue_priority_mapping[i][1] > lowest_priority) { lowest_priority = queue_priority_mapping[i][1]; info->default_queue = i; } + of_node_put(tc_args.np); } /* See if we have optional dma-channel-mask array */ diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index e323e1a5f20f..edad538928dd 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -1088,8 +1088,11 @@ static void udma_check_tx_completion(struct work_struct *work) u32 residue_diff; ktime_t time_diff; unsigned long delay; + unsigned long flags; while (1) { + spin_lock_irqsave(&uc->vc.lock, flags); + if (uc->desc) { /* Get previous residue and time stamp */ residue_diff = uc->tx_drain.residue; @@ -1124,6 +1127,8 @@ static void udma_check_tx_completion(struct work_struct *work) break; } + spin_unlock_irqrestore(&uc->vc.lock, flags); + usleep_range(ktime_to_us(delay), ktime_to_us(delay) + 10); continue; @@ -1140,6 +1145,8 @@ static void udma_check_tx_completion(struct work_struct *work) break; } + + spin_unlock_irqrestore(&uc->vc.lock, flags); } static irqreturn_t udma_ring_irq_handler(int irq, void *data) @@ -4209,7 +4216,6 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { struct udma_dev *ud = ofdma->of_dma_data; - dma_cap_mask_t mask = ud->ddev.cap_mask; struct udma_filter_param filter_param; struct dma_chan *chan; @@ -4241,7 +4247,7 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, } } - chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param, + chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param, ofdma->of_node); if (!chan) { dev_err(ud->dev, "get channel fail in %s.\n", __func__); diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index e7e8e624a436..f865139e43db 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -98,7 +98,7 @@ static irqreturn_t altr_sdram_mc_err_handler(int irq, void *dev_id) if (status & priv->ecc_stat_ce_mask) { regmap_read(drvdata->mc_vbase, priv->ecc_saddr_offset, &err_addr); - if (priv->ecc_uecnt_offset) + if (priv->ecc_cecnt_offset) regmap_read(drvdata->mc_vbase, priv->ecc_cecnt_offset, &err_count); edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, err_count, @@ -1015,9 +1015,6 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask, } } - /* Interrupt mode set to every SBERR */ - regmap_write(ecc_mgr_map, ALTR_A10_ECC_INTMODE_OFST, - ALTR_A10_ECC_INTMODE); /* Enable ECC */ ecc_set_bits(ecc_ctrl_en_mask, (ecc_block_base + ALTR_A10_ECC_CTRL_OFST)); @@ -2138,6 +2135,10 @@ static int altr_edac_a10_probe(struct platform_device *pdev) return PTR_ERR(edac->ecc_mgr_map); } + /* Set irq mask for DDR SBE to avoid any pending irq before registration */ + regmap_write(edac->ecc_mgr_map, A10_SYSMGR_ECC_INTMASK_SET_OFST, + (A10_SYSMGR_ECC_INTMASK_SDMMCB | A10_SYSMGR_ECC_INTMASK_DDR0)); + edac->irq_chip.name = pdev->dev.of_node->name; edac->irq_chip.irq_mask = a10_eccmgr_irq_mask; edac->irq_chip.irq_unmask = a10_eccmgr_irq_unmask; diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h index 3727e72c8c2e..7248d24c4908 100644 --- a/drivers/edac/altera_edac.h +++ b/drivers/edac/altera_edac.h @@ -249,6 +249,8 @@ struct altr_sdram_mc_data { #define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94 #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 #define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1) +#define A10_SYSMGR_ECC_INTMASK_SDMMCB BIT(16) +#define A10_SYSMGR_ECC_INTMASK_DDR0 BIT(17) #define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C #define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 diff --git a/drivers/edac/bluefield_edac.c b/drivers/edac/bluefield_edac.c index e4736eb37bfb..0ef048982768 100644 --- a/drivers/edac/bluefield_edac.c +++ b/drivers/edac/bluefield_edac.c @@ -180,7 +180,7 @@ static void bluefield_edac_check(struct mem_ctl_info *mci) static void bluefield_edac_init_dimms(struct mem_ctl_info *mci) { struct bluefield_edac_priv *priv = mci->pvt_info; - int mem_ctrl_idx = mci->mc_idx; + u64 mem_ctrl_idx = mci->mc_idx; struct dimm_info *dimm; u64 smc_info, smc_arg; int is_empty = 1, i; diff --git a/drivers/edac/fsl_ddr_edac.c b/drivers/edac/fsl_ddr_edac.c index ac2102b25706..2fea9569927b 100644 --- a/drivers/edac/fsl_ddr_edac.c +++ b/drivers/edac/fsl_ddr_edac.c @@ -329,21 +329,25 @@ static void fsl_mc_check(struct mem_ctl_info *mci) * TODO: Add support for 32-bit wide buses */ if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) { + u64 cap = (u64)cap_high << 32 | cap_low; + u32 s = syndrome; + sbe_ecc_decode(cap_high, cap_low, syndrome, &bad_data_bit, &bad_ecc_bit); - if (bad_data_bit != -1) - fsl_mc_printk(mci, KERN_ERR, - "Faulty Data bit: %d\n", bad_data_bit); - if (bad_ecc_bit != -1) - fsl_mc_printk(mci, KERN_ERR, - "Faulty ECC bit: %d\n", bad_ecc_bit); + if (bad_data_bit >= 0) { + fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit); + cap ^= 1ULL << bad_data_bit; + } + + if (bad_ecc_bit >= 0) { + fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit); + s ^= 1 << bad_ecc_bit; + } fsl_mc_printk(mci, KERN_ERR, "Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n", - cap_high ^ (1 << (bad_data_bit - 32)), - cap_low ^ (1 << bad_data_bit), - syndrome ^ (1 << bad_ecc_bit)); + upper_32_bits(cap), lower_32_bits(cap), s); } fsl_mc_printk(mci, KERN_ERR, diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c index 9ef13570f2e5..e3635fba63b4 100644 --- a/drivers/edac/ie31200_edac.c +++ b/drivers/edac/ie31200_edac.c @@ -91,8 +91,6 @@ (((did) & PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK) == \ PCI_DEVICE_ID_INTEL_IE31200_HB_CFL_MASK)) -#define IE31200_DIMMS 4 -#define IE31200_RANKS 8 #define IE31200_RANKS_PER_CHANNEL 4 #define IE31200_DIMMS_PER_CHANNEL 2 #define IE31200_CHANNELS 2 @@ -164,6 +162,7 @@ #define IE31200_MAD_DIMM_0_OFFSET 0x5004 #define IE31200_MAD_DIMM_0_OFFSET_SKL 0x500C #define IE31200_MAD_DIMM_SIZE GENMASK_ULL(7, 0) +#define IE31200_MAD_DIMM_SIZE_SKL GENMASK_ULL(5, 0) #define IE31200_MAD_DIMM_A_RANK BIT(17) #define IE31200_MAD_DIMM_A_RANK_SHIFT 17 #define IE31200_MAD_DIMM_A_RANK_SKL BIT(10) @@ -377,7 +376,7 @@ static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev) static void __skl_populate_dimm_info(struct dimm_data *dd, u32 addr_decode, int chan) { - dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE; + dd->size = (addr_decode >> (chan << 4)) & IE31200_MAD_DIMM_SIZE_SKL; dd->dual_rank = (addr_decode & (IE31200_MAD_DIMM_A_RANK_SKL << (chan << 4))) ? 1 : 0; dd->x16_width = ((addr_decode & (IE31200_MAD_DIMM_A_WIDTH_SKL << (chan << 4))) >> (IE31200_MAD_DIMM_A_WIDTH_SKL_SHIFT + (chan << 4))); @@ -406,10 +405,9 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) int i, j, ret; struct mem_ctl_info *mci = NULL; struct edac_mc_layer layers[2]; - struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL]; void __iomem *window; struct ie31200_priv *priv; - u32 addr_decode, mad_offset; + u32 addr_decode[IE31200_CHANNELS], mad_offset; /* * Kaby Lake, Coffee Lake seem to work like Skylake. Please re-visit @@ -426,7 +424,7 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) nr_channels = how_many_channels(pdev); layers[0].type = EDAC_MC_LAYER_CHIP_SELECT; - layers[0].size = IE31200_DIMMS; + layers[0].size = IE31200_RANKS_PER_CHANNEL; layers[0].is_virt_csrow = true; layers[1].type = EDAC_MC_LAYER_CHANNEL; layers[1].size = nr_channels; @@ -467,19 +465,10 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) mad_offset = IE31200_MAD_DIMM_0_OFFSET; } - /* populate DIMM info */ for (i = 0; i < IE31200_CHANNELS; i++) { - addr_decode = readl(window + mad_offset + + addr_decode[i] = readl(window + mad_offset + (i * 4)); - edac_dbg(0, "addr_decode: 0x%x\n", addr_decode); - for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) { - populate_dimm_info(&dimm_info[i][j], addr_decode, j, - skl); - edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n", - dimm_info[i][j].size, - dimm_info[i][j].dual_rank, - dimm_info[i][j].x16_width); - } + edac_dbg(0, "addr_decode: 0x%x\n", addr_decode[i]); } /* @@ -490,14 +479,22 @@ static int ie31200_probe1(struct pci_dev *pdev, int dev_idx) */ for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) { for (j = 0; j < IE31200_CHANNELS; j++) { + struct dimm_data dimm_info; struct dimm_info *dimm; unsigned long nr_pages; - nr_pages = IE31200_PAGES(dimm_info[j][i].size, skl); + populate_dimm_info(&dimm_info, addr_decode[j], i, + skl); + edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n", + dimm_info.size, + dimm_info.dual_rank, + dimm_info.x16_width); + + nr_pages = IE31200_PAGES(dimm_info.size, skl); if (nr_pages == 0) continue; - if (dimm_info[j][i].dual_rank) { + if (dimm_info.dual_rank) { nr_pages = nr_pages / 2; dimm = edac_get_dimm(mci, (i * 2) + 1, j, 0); dimm->nr_pages = nr_pages; @@ -618,7 +615,7 @@ static int __init ie31200_init(void) pci_rc = pci_register_driver(&ie31200_driver); if (pci_rc < 0) - goto fail0; + return pci_rc; if (!mci_pdev) { ie31200_registered = 0; @@ -629,11 +626,13 @@ static int __init ie31200_init(void) if (mci_pdev) break; } + if (!mci_pdev) { edac_dbg(0, "ie31200 pci_get_device fail\n"); pci_rc = -ENODEV; - goto fail1; + goto fail0; } + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]); if (pci_rc < 0) { edac_dbg(0, "ie31200 init fail\n"); @@ -641,12 +640,12 @@ static int __init ie31200_init(void) goto fail1; } } - return 0; + return 0; fail1: - pci_unregister_driver(&ie31200_driver); -fail0: pci_dev_put(mci_pdev); +fail0: + pci_unregister_driver(&ie31200_driver); return pci_rc; } diff --git a/drivers/edac/igen6_edac.c b/drivers/edac/igen6_edac.c index 74c5aad1f608..0ab8642c4e55 100644 --- a/drivers/edac/igen6_edac.c +++ b/drivers/edac/igen6_edac.c @@ -1075,6 +1075,7 @@ static int igen6_register_mci(int mc, u64 mchbar, struct pci_dev *pdev) imc->mci = mci; return 0; fail3: + mci->pvt_info = NULL; kfree(mci->ctl_name); fail2: edac_mc_free(mci); @@ -1099,6 +1100,7 @@ static void igen6_unregister_mcis(void) edac_mc_del_mc(mci->pdev); kfree(mci->ctl_name); + mci->pvt_info = NULL; edac_mc_free(mci); iounmap(imc->window); } diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index 518092d7eaf7..303d0871184a 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b * Configure interrupt enable registers such that Tag, Data RAM related * interrupts are propagated to interrupt controller for servicing */ - ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable, TRP0_INTERRUPT_ENABLE, TRP0_INTERRUPT_ENABLE); if (ret) @@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b if (ret) return ret; - ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable, + ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable, DRP0_INTERRUPT_ENABLE, DRP0_INTERRUPT_ENABLE); if (ret) diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 303d2f79e606..7b71610ad65c 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -139,7 +139,7 @@ config ISCSI_IBFT select ISCSI_BOOT_SYSFS select ISCSI_IBFT_FIND if X86 depends on ACPI && SCSI && SCSI_LOWLEVEL - default n + default n help This option enables support for detection and exposing of iSCSI Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 248594b59c64..5bda5d7ade42 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -191,6 +191,7 @@ struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id, dev = &ffa_dev->dev; dev->bus = &ffa_bus_type; dev->release = ffa_release_device; + dev->dma_mask = &dev->coherent_dma_mask; dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id); ffa_dev->id = id; diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index e9f86b757301..e1e278d431e9 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -306,7 +306,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3, memcpy(buffer + idx, drv_info->rx_buffer + idx * sz, buf_sz); - ffa_rx_release(); + if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY)) + ffa_rx_release(); mutex_unlock(&drv_info->rx_lock); diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 35bb70724d44..8c6f99d15f22 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -73,6 +73,9 @@ struct scmi_device *scmi_child_dev_find(struct device *parent, if (!dev) return NULL; + /* Drop the refcnt bumped implicitly by device_find_child */ + put_device(dev); + return to_scmi_dev(dev); } diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 435d0e2658a4..3de25e9d18ef 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -627,6 +627,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) if (ret) return ERR_PTR(ret); + if (!buf.opp_count) + return ERR_PTR(-ENOENT); + info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) return ERR_PTR(-ENOMEM); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 28d4defc5d0c..9786dfff9f84 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -835,13 +835,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size, EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO | EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP | EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO | - EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE)) + EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE | + EFI_MEMORY_RUNTIME)) snprintf(pos, size, "|attr=0x%016llx]", (unsigned long long)attr); else snprintf(pos, size, - "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", + "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]", attr & EFI_MEMORY_RUNTIME ? "RUN" : "", + attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "", attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "", attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "", attr & EFI_MEMORY_SP ? "SP" : "", diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index f3c7fa9bd8f3..f1af8c5136c2 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -7,7 +7,7 @@ # cflags-$(CONFIG_X86_32) := -march=i386 cflags-$(CONFIG_X86_64) := -mcmodel=small -cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ \ +cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \ -fPIC -fno-strict-aliasing -mno-red-zone \ -mno-mmx -mno-sse -fshort-wchar \ -Wno-pointer-sign \ diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c index fff826f56728..fc076055a215 100644 --- a/drivers/firmware/efi/libstub/randomalloc.c +++ b/drivers/firmware/efi/libstub/randomalloc.c @@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md, if (md->type != EFI_CONVENTIONAL_MEMORY) return 0; + if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE) + return 0; + if (efi_soft_reserve_enabled() && (md->attribute & EFI_MEMORY_SP)) return 0; @@ -72,6 +75,10 @@ efi_status_t efi_random_alloc(unsigned long size, if (align < EFI_ALLOC_ALIGN) align = EFI_ALLOC_ALIGN; + /* Avoid address 0x0, as it can be mistaken for NULL */ + if (alloc_min == 0) + alloc_min = align; + size = round_up(size, EFI_ALLOC_ALIGN); /* count the suitable slots in each memory map entry */ diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c index bf6fbd5d22a1..713ee2de02cf 100644 --- a/drivers/firmware/efi/libstub/relocate.c +++ b/drivers/firmware/efi/libstub/relocate.c @@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align, if (desc->type != EFI_CONVENTIONAL_MEMORY) continue; + if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE) + continue; + if (efi_soft_reserve_enabled() && (desc->attribute & EFI_MEMORY_SP)) continue; diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c index 456d0e5eaf78..f47968029983 100644 --- a/drivers/firmware/efi/sysfb_efi.c +++ b/drivers/firmware/efi/sysfb_efi.c @@ -91,6 +91,7 @@ void efifb_setup_from_dmi(struct screen_info *si, const char *opt) _ret_; \ }) +#ifdef CONFIG_EFI static int __init efifb_set_system(const struct dmi_system_id *id) { struct efifb_dmi_info *info = id->driver_data; @@ -346,7 +347,6 @@ static const struct fwnode_operations efifb_fwnode_ops = { .add_links = efifb_add_links, }; -#ifdef CONFIG_EFI static struct fwnode_handle efifb_fwnode; __init void sysfb_apply_efi_quirks(void) diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index e8d69bd548f3..9c3613e6af15 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -40,7 +40,8 @@ int __init efi_tpm_eventlog_init(void) { struct linux_efi_tpm_eventlog *log_tbl; struct efi_tcg2_final_events_table *final_tbl; - int tbl_size; + unsigned int tbl_size; + int final_tbl_size; int ret = 0; if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) { @@ -80,26 +81,26 @@ int __init efi_tpm_eventlog_init(void) goto out; } - tbl_size = 0; + final_tbl_size = 0; if (final_tbl->nr_events != 0) { void *events = (void *)efi.tpm_final_log + sizeof(final_tbl->version) + sizeof(final_tbl->nr_events); - tbl_size = tpm2_calc_event_log_size(events, - final_tbl->nr_events, - log_tbl->log); + final_tbl_size = tpm2_calc_event_log_size(events, + final_tbl->nr_events, + log_tbl->log); } - if (tbl_size < 0) { + if (final_tbl_size < 0) { pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n"); ret = -EINVAL; goto out_calc; } memblock_reserve(efi.tpm_final_log, - tbl_size + sizeof(*final_tbl)); - efi_tpm_final_log_size = tbl_size; + final_tbl_size + sizeof(*final_tbl)); + efi_tpm_final_log_size = final_tbl_size; out_calc: early_memunmap(final_tbl, sizeof(*final_tbl)); diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c index 871bedf533a8..8c75308b01d5 100644 --- a/drivers/firmware/google/gsmi.c +++ b/drivers/firmware/google/gsmi.c @@ -918,7 +918,8 @@ static __init int gsmi_init(void) gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info); if (IS_ERR(gsmi_dev.pdev)) { printk(KERN_ERR "gsmi: unable to register platform device\n"); - return PTR_ERR(gsmi_dev.pdev); + ret = PTR_ERR(gsmi_dev.pdev); + goto out_unregister; } /* SMI access needs to be serialized */ @@ -1056,10 +1057,11 @@ out_err: gsmi_buf_free(gsmi_dev.name_buf); kmem_cache_destroy(gsmi_dev.mem_pool); platform_device_unregister(gsmi_dev.pdev); - pr_info("gsmi: failed to load: %d\n", ret); +out_unregister: #ifdef CONFIG_PM platform_driver_unregister(&gsmi_driver_info); #endif + pr_info("gsmi: failed to load: %d\n", ret); return ret; } diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c index dca79caccd01..fa25c082109a 100644 --- a/drivers/firmware/imx/imx-scu.c +++ b/drivers/firmware/imx/imx-scu.c @@ -279,6 +279,7 @@ static int imx_scu_probe(struct platform_device *pdev) return ret; sc_ipc->fast_ipc = of_device_is_compatible(args.np, "fsl,imx8-mu-scu"); + of_node_put(args.np); num_channel = sc_ipc->fast_ipc ? 2 : SCU_MU_CHAN_NUM; for (i = 0; i < num_channel; i++) { diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 6e9788324fea..371f24569b3b 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf) str += sprintf_ipaddr(str, nic->ip_addr); break; case ISCSI_BOOT_ETH_SUBNET_MASK: - val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1)); + if (nic->subnet_mask_prefix > 32) + val = cpu_to_be32(~0); + else + val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1)); str += sprintf(str, "%pI4", &val); break; case ISCSI_BOOT_ETH_PREFIX_LEN: diff --git a/drivers/firmware/smccc/smccc.c b/drivers/firmware/smccc/smccc.c index db818f9dcb8e..105cc7d9f4c3 100644 --- a/drivers/firmware/smccc/smccc.c +++ b/drivers/firmware/smccc/smccc.c @@ -16,7 +16,6 @@ static u32 smccc_version = ARM_SMCCC_VERSION_1_0; static enum arm_smccc_conduit smccc_conduit = SMCCC_CONDUIT_NONE; bool __ro_after_init smccc_trng_available = false; -u64 __ro_after_init smccc_has_sve_hint = false; s32 __ro_after_init smccc_soc_id_version = SMCCC_RET_NOT_SUPPORTED; s32 __ro_after_init smccc_soc_id_revision = SMCCC_RET_NOT_SUPPORTED; @@ -28,9 +27,6 @@ void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit) smccc_conduit = conduit; smccc_trng_available = smccc_probe_trng(); - if (IS_ENABLED(CONFIG_ARM64_SVE) && - smccc_version >= ARM_SMCCC_VERSION_1_3) - smccc_has_sve_hint = true; if ((smccc_version >= ARM_SMCCC_VERSION_1_2) && (smccc_conduit != SMCCC_CONDUIT_NONE)) { diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c index 4ffb9da537d8..5295ff90482b 100644 --- a/drivers/fpga/altera-cvp.c +++ b/drivers/fpga/altera-cvp.c @@ -52,7 +52,7 @@ /* V2 Defines */ #define VSE_CVP_TX_CREDITS 0x49 /* 8bit */ -#define V2_CREDIT_TIMEOUT_US 20000 +#define V2_CREDIT_TIMEOUT_US 40000 #define V2_CHECK_CREDIT_US 10 #define V2_POLL_TIMEOUT_US 1000000 #define V2_USER_TIMEOUT_US 500000 diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 833ce13ff6f8..698d6cbf782a 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -55,33 +55,26 @@ int fpga_bridge_disable(struct fpga_bridge *bridge) } EXPORT_SYMBOL_GPL(fpga_bridge_disable); -static struct fpga_bridge *__fpga_bridge_get(struct device *dev, +static struct fpga_bridge *__fpga_bridge_get(struct device *bridge_dev, struct fpga_image_info *info) { struct fpga_bridge *bridge; - int ret = -ENODEV; - bridge = to_fpga_bridge(dev); + bridge = to_fpga_bridge(bridge_dev); bridge->info = info; - if (!mutex_trylock(&bridge->mutex)) { - ret = -EBUSY; - goto err_dev; - } + if (!mutex_trylock(&bridge->mutex)) + return ERR_PTR(-EBUSY); - if (!try_module_get(dev->parent->driver->owner)) - goto err_ll_mod; + if (!try_module_get(bridge->br_ops_owner)) { + mutex_unlock(&bridge->mutex); + return ERR_PTR(-ENODEV); + } dev_dbg(&bridge->dev, "get\n"); return bridge; - -err_ll_mod: - mutex_unlock(&bridge->mutex); -err_dev: - put_device(dev); - return ERR_PTR(ret); } /** @@ -97,13 +90,18 @@ err_dev: struct fpga_bridge *of_fpga_bridge_get(struct device_node *np, struct fpga_image_info *info) { - struct device *dev; + struct fpga_bridge *bridge; + struct device *bridge_dev; - dev = class_find_device_by_of_node(fpga_bridge_class, np); - if (!dev) + bridge_dev = class_find_device_by_of_node(fpga_bridge_class, np); + if (!bridge_dev) return ERR_PTR(-ENODEV); - return __fpga_bridge_get(dev, info); + bridge = __fpga_bridge_get(bridge_dev, info); + if (IS_ERR(bridge)) + put_device(bridge_dev); + + return bridge; } EXPORT_SYMBOL_GPL(of_fpga_bridge_get); @@ -124,6 +122,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data) struct fpga_bridge *fpga_bridge_get(struct device *dev, struct fpga_image_info *info) { + struct fpga_bridge *bridge; struct device *bridge_dev; bridge_dev = class_find_device(fpga_bridge_class, NULL, dev, @@ -131,7 +130,11 @@ struct fpga_bridge *fpga_bridge_get(struct device *dev, if (!bridge_dev) return ERR_PTR(-ENODEV); - return __fpga_bridge_get(bridge_dev, info); + bridge = __fpga_bridge_get(bridge_dev, info); + if (IS_ERR(bridge)) + put_device(bridge_dev); + + return bridge; } EXPORT_SYMBOL_GPL(fpga_bridge_get); @@ -145,7 +148,7 @@ void fpga_bridge_put(struct fpga_bridge *bridge) dev_dbg(&bridge->dev, "put\n"); bridge->info = NULL; - module_put(bridge->dev.parent->driver->owner); + module_put(bridge->br_ops_owner); mutex_unlock(&bridge->mutex); put_device(&bridge->dev); } @@ -312,18 +315,19 @@ static struct attribute *fpga_bridge_attrs[] = { ATTRIBUTE_GROUPS(fpga_bridge); /** - * fpga_bridge_register - create and register an FPGA Bridge device + * __fpga_bridge_register - create and register an FPGA Bridge device * @parent: FPGA bridge device from pdev * @name: FPGA bridge name * @br_ops: pointer to structure of fpga bridge ops * @priv: FPGA bridge private data + * @owner: owner module containing the br_ops * * Return: struct fpga_bridge pointer or ERR_PTR() */ struct fpga_bridge * -fpga_bridge_register(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, - void *priv) +__fpga_bridge_register(struct device *parent, const char *name, + const struct fpga_bridge_ops *br_ops, + void *priv, struct module *owner) { struct fpga_bridge *bridge; int id, ret; @@ -353,6 +357,7 @@ fpga_bridge_register(struct device *parent, const char *name, bridge->name = name; bridge->br_ops = br_ops; + bridge->br_ops_owner = owner; bridge->priv = priv; bridge->dev.groups = br_ops->groups; @@ -382,7 +387,7 @@ error_kfree: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_bridge_register); +EXPORT_SYMBOL_GPL(__fpga_bridge_register); /** * fpga_bridge_unregister - unregister an FPGA bridge diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index 8efa67620e21..0c71d91ba7f6 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -664,20 +664,16 @@ static struct attribute *fpga_mgr_attrs[] = { }; ATTRIBUTE_GROUPS(fpga_mgr); -static struct fpga_manager *__fpga_mgr_get(struct device *dev) +static struct fpga_manager *__fpga_mgr_get(struct device *mgr_dev) { struct fpga_manager *mgr; - mgr = to_fpga_manager(dev); + mgr = to_fpga_manager(mgr_dev); - if (!try_module_get(dev->parent->driver->owner)) - goto err_dev; + if (!try_module_get(mgr->mops_owner)) + mgr = ERR_PTR(-ENODEV); return mgr; - -err_dev: - put_device(dev); - return ERR_PTR(-ENODEV); } static int fpga_mgr_dev_match(struct device *dev, const void *data) @@ -693,12 +689,18 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data) */ struct fpga_manager *fpga_mgr_get(struct device *dev) { - struct device *mgr_dev = class_find_device(fpga_mgr_class, NULL, dev, - fpga_mgr_dev_match); + struct fpga_manager *mgr; + struct device *mgr_dev; + + mgr_dev = class_find_device(fpga_mgr_class, NULL, dev, fpga_mgr_dev_match); if (!mgr_dev) return ERR_PTR(-ENODEV); - return __fpga_mgr_get(mgr_dev); + mgr = __fpga_mgr_get(mgr_dev); + if (IS_ERR(mgr)) + put_device(mgr_dev); + + return mgr; } EXPORT_SYMBOL_GPL(fpga_mgr_get); @@ -711,13 +713,18 @@ EXPORT_SYMBOL_GPL(fpga_mgr_get); */ struct fpga_manager *of_fpga_mgr_get(struct device_node *node) { - struct device *dev; + struct fpga_manager *mgr; + struct device *mgr_dev; - dev = class_find_device_by_of_node(fpga_mgr_class, node); - if (!dev) + mgr_dev = class_find_device_by_of_node(fpga_mgr_class, node); + if (!mgr_dev) return ERR_PTR(-ENODEV); - return __fpga_mgr_get(dev); + mgr = __fpga_mgr_get(mgr_dev); + if (IS_ERR(mgr)) + put_device(mgr_dev); + + return mgr; } EXPORT_SYMBOL_GPL(of_fpga_mgr_get); @@ -727,7 +734,7 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get); */ void fpga_mgr_put(struct fpga_manager *mgr) { - module_put(mgr->dev.parent->driver->owner); + module_put(mgr->mops_owner); put_device(&mgr->dev); } EXPORT_SYMBOL_GPL(fpga_mgr_put); @@ -766,9 +773,10 @@ void fpga_mgr_unlock(struct fpga_manager *mgr) EXPORT_SYMBOL_GPL(fpga_mgr_unlock); /** - * fpga_mgr_register_full - create and register an FPGA Manager device + * __fpga_mgr_register_full - create and register an FPGA Manager device * @parent: fpga manager device from pdev * @info: parameters for fpga manager + * @owner: owner module containing the ops * * The caller of this function is responsible for calling fpga_mgr_unregister(). * Using devm_fpga_mgr_register_full() instead is recommended. @@ -776,7 +784,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock); * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ struct fpga_manager * -fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info) +__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner) { const struct fpga_manager_ops *mops = info->mops; struct fpga_manager *mgr; @@ -804,6 +813,8 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in mutex_init(&mgr->ref_mutex); + mgr->mops_owner = owner; + mgr->name = info->name; mgr->mops = info->mops; mgr->priv = info->priv; @@ -841,14 +852,15 @@ error_kfree: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_mgr_register_full); +EXPORT_SYMBOL_GPL(__fpga_mgr_register_full); /** - * fpga_mgr_register - create and register an FPGA Manager device + * __fpga_mgr_register - create and register an FPGA Manager device * @parent: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data + * @owner: owner module containing the ops * * The caller of this function is responsible for calling fpga_mgr_unregister(). * Using devm_fpga_mgr_register() instead is recommended. This simple @@ -859,8 +871,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_register_full); * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ struct fpga_manager * -fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv) +__fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, struct module *owner) { struct fpga_manager_info info = { 0 }; @@ -868,9 +880,9 @@ fpga_mgr_register(struct device *parent, const char *name, info.mops = mops; info.priv = priv; - return fpga_mgr_register_full(parent, &info); + return __fpga_mgr_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(fpga_mgr_register); +EXPORT_SYMBOL_GPL(__fpga_mgr_register); /** * fpga_mgr_unregister - unregister an FPGA manager @@ -900,9 +912,10 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) } /** - * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register() + * __devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register() * @parent: fpga manager device from pdev * @info: parameters for fpga manager + * @owner: owner module containing the ops * * Return: fpga manager pointer on success, negative error code otherwise. * @@ -910,7 +923,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) * function will be called automatically when the managing device is detached. */ struct fpga_manager * -devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info) +__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner) { struct fpga_mgr_devres *dr; struct fpga_manager *mgr; @@ -919,7 +933,7 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf if (!dr) return ERR_PTR(-ENOMEM); - mgr = fpga_mgr_register_full(parent, info); + mgr = __fpga_mgr_register_full(parent, info, owner); if (IS_ERR(mgr)) { devres_free(dr); return mgr; @@ -930,14 +944,15 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf return mgr; } -EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full); +EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register_full); /** - * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() + * __devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() * @parent: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data + * @owner: owner module containing the ops * * Return: fpga manager pointer on success, negative error code otherwise. * @@ -946,8 +961,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full); * device is detached. */ struct fpga_manager * -devm_fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv) +__devm_fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, + struct module *owner) { struct fpga_manager_info info = { 0 }; @@ -955,9 +971,9 @@ devm_fpga_mgr_register(struct device *parent, const char *name, info.mops = mops; info.priv = priv; - return devm_fpga_mgr_register_full(parent, &info); + return __devm_fpga_mgr_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(devm_fpga_mgr_register); +EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register); static void fpga_mgr_dev_release(struct device *dev) { diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index 0cb2664085cf..836e21fde028 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -116,10 +116,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf, struct platform_device *pdev; int res, id; + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + /* kernfs guarantees string termination, so count + 1 is safe */ aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL); - if (!aggr) - return -ENOMEM; + if (!aggr) { + res = -ENOMEM; + goto put_module; + } memcpy(aggr->args, buf, count + 1); @@ -158,6 +163,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf, } aggr->pdev = pdev; + module_put(THIS_MODULE); return count; remove_table: @@ -172,6 +178,8 @@ free_table: kfree(aggr->lookups); free_ga: kfree(aggr); +put_module: + module_put(THIS_MODULE); return res; } @@ -200,13 +208,19 @@ static ssize_t delete_device_store(struct device_driver *driver, if (error) return error; + if (!try_module_get(THIS_MODULE)) + return -ENOENT; + mutex_lock(&gpio_aggregator_lock); aggr = idr_remove(&gpio_aggregator_idr, id); mutex_unlock(&gpio_aggregator_lock); - if (!aggr) + if (!aggr) { + module_put(THIS_MODULE); return -ENOENT; + } gpio_aggregator_free(aggr); + module_put(THIS_MODULE); return count; } static DRIVER_ATTR_WO(delete_device); diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 70770429ba48..3373e46e3ba0 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -68,6 +68,22 @@ struct bcm_kona_gpio { struct bcm_kona_gpio_bank { int id; int irq; + /* + * Used to keep track of lock/unlock operations for each GPIO in the + * bank. + * + * All GPIOs are locked by default (see bcm_kona_gpio_reset), and the + * unlock count for all GPIOs is 0 by default. Each unlock increments + * the counter, and each lock decrements the counter. + * + * The lock function only locks the GPIO once its unlock counter is + * down to 0. This is necessary because the GPIO is unlocked in two + * places in this driver: once for requested GPIOs, and once for + * requested IRQs. Since it is possible for a GPIO to be requested + * as both a GPIO and an IRQ, we need to ensure that we don't lock it + * too early. + */ + u8 gpio_unlock_count[GPIO_PER_BANK]; /* Used in the interrupt handler */ struct bcm_kona_gpio *kona_gpio; }; @@ -85,14 +101,24 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio, u32 val; unsigned long flags; int bank_id = GPIO_BANK(gpio); + int bit = GPIO_BIT(gpio); + struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id]; - raw_spin_lock_irqsave(&kona_gpio->lock, flags); + if (bank->gpio_unlock_count[bit] == 0) { + dev_err(kona_gpio->gpio_chip.parent, + "Unbalanced locks for GPIO %u\n", gpio); + return; + } - val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id)); - val |= BIT(gpio); - bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val); + if (--bank->gpio_unlock_count[bit] == 0) { + raw_spin_lock_irqsave(&kona_gpio->lock, flags); - raw_spin_unlock_irqrestore(&kona_gpio->lock, flags); + val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id)); + val |= BIT(bit); + bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val); + + raw_spin_unlock_irqrestore(&kona_gpio->lock, flags); + } } static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio, @@ -101,14 +127,20 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio, u32 val; unsigned long flags; int bank_id = GPIO_BANK(gpio); + int bit = GPIO_BIT(gpio); + struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id]; - raw_spin_lock_irqsave(&kona_gpio->lock, flags); + if (bank->gpio_unlock_count[bit] == 0) { + raw_spin_lock_irqsave(&kona_gpio->lock, flags); - val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id)); - val &= ~BIT(gpio); - bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val); + val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id)); + val &= ~BIT(bit); + bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val); - raw_spin_unlock_irqrestore(&kona_gpio->lock, flags); + raw_spin_unlock_irqrestore(&kona_gpio->lock, flags); + } + + ++bank->gpio_unlock_count[bit]; } static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio) @@ -359,6 +391,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d) kona_gpio = irq_data_get_irq_chip_data(d); reg_base = kona_gpio->reg_base; + raw_spin_lock_irqsave(&kona_gpio->lock, flags); val = readl(reg_base + GPIO_INT_MASK(bank_id)); @@ -381,6 +414,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d) kona_gpio = irq_data_get_irq_chip_data(d); reg_base = kona_gpio->reg_base; + raw_spin_lock_irqsave(&kona_gpio->lock, flags); val = readl(reg_base + GPIO_INT_MSKCLR(bank_id)); @@ -476,15 +510,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc) static int bcm_kona_gpio_irq_reqres(struct irq_data *d) { struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d); + unsigned int gpio = d->hwirq; - return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq); + /* + * We need to unlock the GPIO before any other operations are performed + * on the relevant GPIO configuration registers + */ + bcm_kona_gpio_unlock_gpio(kona_gpio, gpio); + + return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio); } static void bcm_kona_gpio_irq_relres(struct irq_data *d) { struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d); + unsigned int gpio = d->hwirq; - gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq); + /* Once we no longer use it, lock the GPIO again */ + bcm_kona_gpio_lock_gpio(kona_gpio, gpio); + + gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio); } static struct irq_chip bcm_gpio_irq_chip = { @@ -622,7 +667,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev) bank->irq = platform_get_irq(pdev, i); bank->kona_gpio = kona_gpio; if (bank->irq < 0) { - dev_err(dev, "Couldn't get IRQ for bank %d", i); + dev_err(dev, "Couldn't get IRQ for bank %d\n", i); ret = -ENOENT; goto err_irq_domain; } diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c index 482f678c893e..e641bc96c4e7 100644 --- a/drivers/gpio/gpio-exar.c +++ b/drivers/gpio/gpio-exar.c @@ -99,11 +99,13 @@ static void exar_set_value(struct gpio_chip *chip, unsigned int offset, struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip); unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset); unsigned int bit = exar_offset_to_bit(exar_gpio, offset); + unsigned int bit_value = value ? BIT(bit) : 0; - if (value) - regmap_set_bits(exar_gpio->regmap, addr, BIT(bit)); - else - regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit)); + /* + * regmap_write_bits() forces value to be written when an external + * pull up/down might otherwise indicate value was already set. + */ + regmap_write_bits(exar_gpio->regmap, addr, BIT(bit), bit_value); } static int exar_direction_output(struct gpio_chip *chip, unsigned int offset, diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index bea0e32c195d..b7a9e5edd566 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -328,6 +328,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = { static int grgpio_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; + struct device *dev = &ofdev->dev; void __iomem *regs; struct gpio_chip *gc; struct grgpio_priv *priv; @@ -337,7 +338,7 @@ static int grgpio_probe(struct platform_device *ofdev) int size; int i; - priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -346,28 +347,31 @@ static int grgpio_probe(struct platform_device *ofdev) return PTR_ERR(regs); gc = &priv->gc; - err = bgpio_init(gc, &ofdev->dev, 4, regs + GRGPIO_DATA, + err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA, regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL, BGPIOF_BIG_ENDIAN_BYTE_ORDER); if (err) { - dev_err(&ofdev->dev, "bgpio_init() failed\n"); + dev_err(dev, "bgpio_init() failed\n"); return err; } priv->regs = regs; priv->imask = gc->read_reg(regs + GRGPIO_IMASK); - priv->dev = &ofdev->dev; + priv->dev = dev; gc->owner = THIS_MODULE; gc->to_irq = grgpio_to_irq; - gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np); + gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np); + if (!gc->label) + return -ENOMEM; + gc->base = -1; err = of_property_read_u32(np, "nbits", &prop); if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) { gc->ngpio = GRGPIO_MAX_NGPIO; - dev_dbg(&ofdev->dev, - "No or invalid nbits property: assume %d\n", gc->ngpio); + dev_dbg(dev, "No or invalid nbits property: assume %d\n", + gc->ngpio); } else { gc->ngpio = prop; } @@ -379,7 +383,7 @@ static int grgpio_probe(struct platform_device *ofdev) irqmap = (s32 *)of_get_property(np, "irqmap", &size); if (irqmap) { if (size < gc->ngpio) { - dev_err(&ofdev->dev, + dev_err(dev, "irqmap shorter than ngpio (%d < %d)\n", size, gc->ngpio); return -EINVAL; @@ -389,7 +393,7 @@ static int grgpio_probe(struct platform_device *ofdev) &grgpio_irq_domain_ops, priv); if (!priv->domain) { - dev_err(&ofdev->dev, "Could not add irq domain\n"); + dev_err(dev, "Could not add irq domain\n"); return -EINVAL; } @@ -419,13 +423,13 @@ static int grgpio_probe(struct platform_device *ofdev) err = gpiochip_add_data(gc, priv); if (err) { - dev_err(&ofdev->dev, "Could not add gpiochip\n"); + dev_err(dev, "Could not add gpiochip\n"); if (priv->domain) irq_domain_remove(priv->domain); return err; } - dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n", + dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n", priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off"); return 0; diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 853d9aa6b3b1..d456077c74f2 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -445,8 +445,7 @@ static int mxc_gpio_probe(struct platform_device *pdev) port->gc.request = gpiochip_generic_request; port->gc.free = gpiochip_generic_free; port->gc.to_irq = mxc_gpio_to_irq; - port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 : - pdev->id * 32; + port->gc.base = of_alias_get_id(np, "gpio") * 32; err = devm_gpiochip_add_data(&pdev->dev, &port->gc, port); if (err) diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 9ce54bf2030d..f81d79a297a5 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -10,8 +10,9 @@ #include #include -#include +#include #include +#include #include #include #include @@ -20,6 +21,7 @@ #include #include #include +#include #include #include @@ -522,12 +524,10 @@ static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) struct pca953x_chip *chip = gpiochip_get_data(gc); u8 dirreg = chip->recalc_addr(chip, chip->regs->direction, off); u8 bit = BIT(off % BANK_SZ); - int ret; - mutex_lock(&chip->i2c_lock); - ret = regmap_write_bits(chip->regmap, dirreg, bit, bit); - mutex_unlock(&chip->i2c_lock); - return ret; + guard(mutex)(&chip->i2c_lock); + + return regmap_write_bits(chip->regmap, dirreg, bit, bit); } static int pca953x_gpio_direction_output(struct gpio_chip *gc, @@ -539,17 +539,15 @@ static int pca953x_gpio_direction_output(struct gpio_chip *gc, u8 bit = BIT(off % BANK_SZ); int ret; - mutex_lock(&chip->i2c_lock); + guard(mutex)(&chip->i2c_lock); + /* set output level */ ret = regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0); if (ret) - goto exit; + return ret; /* then direction */ - ret = regmap_write_bits(chip->regmap, dirreg, bit, 0); -exit: - mutex_unlock(&chip->i2c_lock); - return ret; + return regmap_write_bits(chip->regmap, dirreg, bit, 0); } static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) @@ -560,9 +558,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) u32 reg_val; int ret; - mutex_lock(&chip->i2c_lock); - ret = regmap_read(chip->regmap, inreg, ®_val); - mutex_unlock(&chip->i2c_lock); + scoped_guard(mutex, &chip->i2c_lock) + ret = regmap_read(chip->regmap, inreg, ®_val); if (ret < 0) return ret; @@ -575,9 +572,9 @@ static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) u8 outreg = chip->recalc_addr(chip, chip->regs->output, off); u8 bit = BIT(off % BANK_SZ); - mutex_lock(&chip->i2c_lock); + guard(mutex)(&chip->i2c_lock); + regmap_write_bits(chip->regmap, outreg, bit, val ? bit : 0); - mutex_unlock(&chip->i2c_lock); } static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off) @@ -588,9 +585,8 @@ static int pca953x_gpio_get_direction(struct gpio_chip *gc, unsigned off) u32 reg_val; int ret; - mutex_lock(&chip->i2c_lock); - ret = regmap_read(chip->regmap, dirreg, ®_val); - mutex_unlock(&chip->i2c_lock); + scoped_guard(mutex, &chip->i2c_lock) + ret = regmap_read(chip->regmap, dirreg, ®_val); if (ret < 0) return ret; @@ -607,9 +603,8 @@ static int pca953x_gpio_get_multiple(struct gpio_chip *gc, DECLARE_BITMAP(reg_val, MAX_LINE); int ret; - mutex_lock(&chip->i2c_lock); - ret = pca953x_read_regs(chip, chip->regs->input, reg_val); - mutex_unlock(&chip->i2c_lock); + scoped_guard(mutex, &chip->i2c_lock) + ret = pca953x_read_regs(chip, chip->regs->input, reg_val); if (ret) return ret; @@ -624,16 +619,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc, DECLARE_BITMAP(reg_val, MAX_LINE); int ret; - mutex_lock(&chip->i2c_lock); + guard(mutex)(&chip->i2c_lock); + ret = pca953x_read_regs(chip, chip->regs->output, reg_val); if (ret) - goto exit; + return; bitmap_replace(reg_val, reg_val, bits, mask, gc->ngpio); pca953x_write_regs(chip, chip->regs->output, reg_val); -exit: - mutex_unlock(&chip->i2c_lock); } static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, @@ -641,7 +635,6 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, unsigned long config) { enum pin_config_param param = pinconf_to_config_param(config); - u8 pull_en_reg = chip->recalc_addr(chip, PCAL953X_PULL_EN, offset); u8 pull_sel_reg = chip->recalc_addr(chip, PCAL953X_PULL_SEL, offset); u8 bit = BIT(offset % BANK_SZ); @@ -654,7 +647,7 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, if (!(chip->driver_data & PCA_PCAL)) return -ENOTSUPP; - mutex_lock(&chip->i2c_lock); + guard(mutex)(&chip->i2c_lock); /* Configure pull-up/pull-down */ if (param == PIN_CONFIG_BIAS_PULL_UP) @@ -664,17 +657,13 @@ static int pca953x_gpio_set_pull_up_down(struct pca953x_chip *chip, else ret = 0; if (ret) - goto exit; + return ret; /* Disable/Enable pull-up/pull-down */ if (param == PIN_CONFIG_BIAS_DISABLE) - ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, 0); + return regmap_write_bits(chip->regmap, pull_en_reg, bit, 0); else - ret = regmap_write_bits(chip->regmap, pull_en_reg, bit, bit); - -exit: - mutex_unlock(&chip->i2c_lock); - return ret; + return regmap_write_bits(chip->regmap, pull_en_reg, bit, bit); } static int pca953x_gpio_set_config(struct gpio_chip *gc, unsigned int offset, @@ -851,25 +840,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin DECLARE_BITMAP(trigger, MAX_LINE); int ret; - if (chip->driver_data & PCA_PCAL) { - /* Read the current interrupt status from the device */ - ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger); - if (ret) - return false; - - /* Check latched inputs and clear interrupt status */ - ret = pca953x_read_regs(chip, chip->regs->input, cur_stat); - if (ret) - return false; - - /* Apply filter for rising/falling edge selection */ - bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio); - - bitmap_and(pending, new_stat, trigger, gc->ngpio); - - return !bitmap_empty(pending, gc->ngpio); - } - ret = pca953x_read_regs(chip, chip->regs->input, cur_stat); if (ret) return false; @@ -906,10 +876,8 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid) bitmap_zero(pending, MAX_LINE); - mutex_lock(&chip->i2c_lock); - ret = pca953x_irq_pending(chip, pending); - mutex_unlock(&chip->i2c_lock); - + scoped_guard(mutex, &chip->i2c_lock) + ret = pca953x_irq_pending(chip, pending); if (ret) { ret = 0; @@ -1218,9 +1186,9 @@ static void pca953x_remove(struct i2c_client *client) } #ifdef CONFIG_PM_SLEEP -static int pca953x_regcache_sync(struct device *dev) +static int pca953x_regcache_sync(struct pca953x_chip *chip) { - struct pca953x_chip *chip = dev_get_drvdata(dev); + struct device *dev = &chip->client->dev; int ret; u8 regaddr; @@ -1267,13 +1235,38 @@ static int pca953x_regcache_sync(struct device *dev) return 0; } +static int pca953x_restore_context(struct pca953x_chip *chip) +{ + int ret; + + guard(mutex)(&chip->i2c_lock); + + if (chip->client->irq > 0) + enable_irq(chip->client->irq); + regcache_cache_only(chip->regmap, false); + regcache_mark_dirty(chip->regmap); + ret = pca953x_regcache_sync(chip); + if (ret) + return ret; + + return regcache_sync(chip->regmap); +} + +static void pca953x_save_context(struct pca953x_chip *chip) +{ + guard(mutex)(&chip->i2c_lock); + + /* Disable IRQ to prevent early triggering while regmap "cache only" is on */ + if (chip->client->irq > 0) + disable_irq(chip->client->irq); + regcache_cache_only(chip->regmap, true); +} + static int pca953x_suspend(struct device *dev) { struct pca953x_chip *chip = dev_get_drvdata(dev); - mutex_lock(&chip->i2c_lock); - regcache_cache_only(chip->regmap, true); - mutex_unlock(&chip->i2c_lock); + pca953x_save_context(chip); if (atomic_read(&chip->wakeup_path)) device_set_wakeup_path(dev); @@ -1296,17 +1289,7 @@ static int pca953x_resume(struct device *dev) } } - mutex_lock(&chip->i2c_lock); - regcache_cache_only(chip->regmap, false); - regcache_mark_dirty(chip->regmap); - ret = pca953x_regcache_sync(dev); - if (ret) { - mutex_unlock(&chip->i2c_lock); - return ret; - } - - ret = regcache_sync(chip->regmap); - mutex_unlock(&chip->i2c_lock); + ret = pca953x_restore_context(chip); if (ret) { dev_err(dev, "Failed to restore register map: %d\n", ret); return ret; diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 5b117f3bd322..ab200e625010 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -41,7 +41,7 @@ struct gpio_rcar_info { struct gpio_rcar_priv { void __iomem *base; - spinlock_t lock; + raw_spinlock_t lock; struct device *dev; struct gpio_chip gpio_chip; unsigned int irq_parent; @@ -124,7 +124,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p, * "Setting Level-Sensitive Interrupt Input Mode" */ - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); /* Configure positive or negative logic in POSNEG */ gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge); @@ -143,7 +143,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p, if (!level_trigger) gpio_rcar_write(p, INTCLR, BIT(hwirq)); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type) @@ -247,7 +247,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, * "Setting General Input Mode" */ - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); /* Configure positive logic in POSNEG */ gpio_rcar_modify_bit(p, POSNEG, gpio, false); @@ -262,7 +262,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, if (p->info.has_outdtsel && output) gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset) @@ -348,7 +348,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask, return 0; } - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); outputs = gpio_rcar_read(p, INOUTSEL); m = outputs & bankmask; if (m) @@ -357,7 +357,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask, m = ~outputs & bankmask; if (m) val |= gpio_rcar_read(p, INDT) & m; - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); bits[0] = val; return 0; @@ -368,9 +368,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value) struct gpio_rcar_priv *p = gpiochip_get_data(chip); unsigned long flags; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); gpio_rcar_modify_bit(p, OUTDT, offset, value); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, @@ -387,12 +387,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask, if (!bankmask) return; - spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&p->lock, flags); val = gpio_rcar_read(p, OUTDT); val &= ~bankmask; val |= (bankmask & bits[0]); gpio_rcar_write(p, OUTDT, val); - spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&p->lock, flags); } static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset, @@ -469,7 +469,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins) p->info = *info; ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args); - *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK; + if (ret) { + *npins = RCAR_MAX_GPIO_PER_BANK; + } else { + *npins = args.args[2]; + of_node_put(args.np); + } if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) { dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n", @@ -506,7 +511,7 @@ static int gpio_rcar_probe(struct platform_device *pdev) return -ENOMEM; p->dev = dev; - spin_lock_init(&p->lock); + raw_spin_lock_init(&p->lock); /* Get device configuration from DT node */ ret = gpio_rcar_parse_dt(p, &npins); diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c index 0fa4f0a93378..0ef21752b05b 100644 --- a/drivers/gpio/gpio-stmpe.c +++ b/drivers/gpio/gpio-stmpe.c @@ -191,7 +191,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) [REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB, [REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB, }; - int i, j; + int ret, i, j; /* * STMPE1600: to be able to get IRQ from pins, @@ -199,8 +199,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) * GPSR or GPCR registers */ if (stmpe->partnum == STMPE1600) { - stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]); - stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]); + ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]); + if (ret < 0) { + dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret); + goto err; + } + ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]); + if (ret < 0) { + dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret); + goto err; + } } for (i = 0; i < CACHE_NR_REGS; i++) { @@ -222,6 +230,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d) } } +err: mutex_unlock(&stmpe_gpio->irq_lock); } diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index 54d9fa7da9c1..7185554b025e 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -753,6 +753,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev) struct gpio_irq_chip *irq; struct tegra_gpio *gpio; struct device_node *np; + struct resource *res; char **names; int err; @@ -772,19 +773,19 @@ static int tegra186_gpio_probe(struct platform_device *pdev) gpio->num_banks++; /* get register apertures */ - gpio->secure = devm_platform_ioremap_resource_byname(pdev, "security"); - if (IS_ERR(gpio->secure)) { - gpio->secure = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(gpio->secure)) - return PTR_ERR(gpio->secure); - } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "security"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->secure = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->secure)) + return PTR_ERR(gpio->secure); - gpio->base = devm_platform_ioremap_resource_byname(pdev, "gpio"); - if (IS_ERR(gpio->base)) { - gpio->base = devm_platform_ioremap_resource(pdev, 1); - if (IS_ERR(gpio->base)) - return PTR_ERR(gpio->base); - } + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gpio"); + if (!res) + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + gpio->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->base)) + return PTR_ERR(gpio->base); err = platform_irq_count(pdev); if (err < 0) diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index 2fc6b6ff7f16..8f094c898002 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -52,7 +52,6 @@ * @dir: GPIO direction shadow register * @gpio_lock: Lock used for synchronization * @irq: IRQ used by GPIO device - * @irqchip: IRQ chip * @enable: GPIO IRQ enable/disable bitfield * @rising_edge: GPIO IRQ rising edge enable/disable bitfield * @falling_edge: GPIO IRQ falling edge enable/disable bitfield @@ -66,9 +65,8 @@ struct xgpio_instance { DECLARE_BITMAP(state, 64); DECLARE_BITMAP(last_irq_read, 64); DECLARE_BITMAP(dir, 64); - spinlock_t gpio_lock; /* For serializing operations */ + raw_spinlock_t gpio_lock; /* For serializing operations */ int irq; - struct irq_chip irqchip; DECLARE_BITMAP(enable, 64); DECLARE_BITMAP(rising_edge, 64); DECLARE_BITMAP(falling_edge, 64); @@ -181,14 +179,14 @@ static void xgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) struct xgpio_instance *chip = gpiochip_get_data(gc); int bit = xgpio_to_bit(chip, gpio); - spin_lock_irqsave(&chip->gpio_lock, flags); + raw_spin_lock_irqsave(&chip->gpio_lock, flags); /* Write to GPIO signal and set its direction to output */ __assign_bit(bit, chip->state, val); xgpio_write_ch(chip, XGPIO_DATA_OFFSET, bit, chip->state); - spin_unlock_irqrestore(&chip->gpio_lock, flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); } /** @@ -212,7 +210,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, bitmap_remap(hw_mask, mask, chip->sw_map, chip->hw_map, 64); bitmap_remap(hw_bits, bits, chip->sw_map, chip->hw_map, 64); - spin_lock_irqsave(&chip->gpio_lock, flags); + raw_spin_lock_irqsave(&chip->gpio_lock, flags); bitmap_replace(state, chip->state, hw_bits, hw_mask, 64); @@ -220,7 +218,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask, bitmap_copy(chip->state, state, 64); - spin_unlock_irqrestore(&chip->gpio_lock, flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); } /** @@ -238,13 +236,13 @@ static int xgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) struct xgpio_instance *chip = gpiochip_get_data(gc); int bit = xgpio_to_bit(chip, gpio); - spin_lock_irqsave(&chip->gpio_lock, flags); + raw_spin_lock_irqsave(&chip->gpio_lock, flags); /* Set the GPIO bit in shadow register and set direction as input */ __set_bit(bit, chip->dir); xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir); - spin_unlock_irqrestore(&chip->gpio_lock, flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); return 0; } @@ -267,7 +265,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) struct xgpio_instance *chip = gpiochip_get_data(gc); int bit = xgpio_to_bit(chip, gpio); - spin_lock_irqsave(&chip->gpio_lock, flags); + raw_spin_lock_irqsave(&chip->gpio_lock, flags); /* Write state of GPIO signal */ __assign_bit(bit, chip->state, val); @@ -277,7 +275,7 @@ static int xgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) __clear_bit(bit, chip->dir); xgpio_write_ch(chip, XGPIO_TRI_OFFSET, bit, chip->dir); - spin_unlock_irqrestore(&chip->gpio_lock, flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); return 0; } @@ -405,7 +403,7 @@ static void xgpio_irq_mask(struct irq_data *irq_data) int bit = xgpio_to_bit(chip, irq_offset); u32 mask = BIT(bit / 32), temp; - spin_lock_irqsave(&chip->gpio_lock, flags); + raw_spin_lock_irqsave(&chip->gpio_lock, flags); __clear_bit(bit, chip->enable); @@ -415,7 +413,9 @@ static void xgpio_irq_mask(struct irq_data *irq_data) temp &= ~mask; xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, temp); } - spin_unlock_irqrestore(&chip->gpio_lock, flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); + + gpiochip_disable_irq(&chip->gc, irq_offset); } /** @@ -431,7 +431,9 @@ static void xgpio_irq_unmask(struct irq_data *irq_data) u32 old_enable = xgpio_get_value32(chip->enable, bit); u32 mask = BIT(bit / 32), val; - spin_lock_irqsave(&chip->gpio_lock, flags); + gpiochip_enable_irq(&chip->gc, irq_offset); + + raw_spin_lock_irqsave(&chip->gpio_lock, flags); __set_bit(bit, chip->enable); @@ -450,7 +452,7 @@ static void xgpio_irq_unmask(struct irq_data *irq_data) xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, val); } - spin_unlock_irqrestore(&chip->gpio_lock, flags); + raw_spin_unlock_irqrestore(&chip->gpio_lock, flags); } /** @@ -515,7 +517,7 @@ static void xgpio_irqhandler(struct irq_desc *desc) chained_irq_enter(irqchip, desc); - spin_lock(&chip->gpio_lock); + raw_spin_lock(&chip->gpio_lock); xgpio_read_ch_all(chip, XGPIO_DATA_OFFSET, all); @@ -532,7 +534,7 @@ static void xgpio_irqhandler(struct irq_desc *desc) bitmap_copy(chip->last_irq_read, all, 64); bitmap_or(all, rising, falling, 64); - spin_unlock(&chip->gpio_lock); + raw_spin_unlock(&chip->gpio_lock); dev_dbg(gc->parent, "IRQ rising %*pb falling %*pb\n", 64, rising, 64, falling); @@ -544,6 +546,16 @@ static void xgpio_irqhandler(struct irq_desc *desc) chained_irq_exit(irqchip, desc); } +static const struct irq_chip xgpio_irq_chip = { + .name = "gpio-xilinx", + .irq_ack = xgpio_irq_ack, + .irq_mask = xgpio_irq_mask, + .irq_unmask = xgpio_irq_unmask, + .irq_set_type = xgpio_set_irq_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + /** * xgpio_probe - Probe method for the GPIO device. * @pdev: pointer to the platform device @@ -623,7 +635,7 @@ static int xgpio_probe(struct platform_device *pdev) bitmap_set(chip->hw_map, 0, width[0]); bitmap_set(chip->hw_map, 32, width[1]); - spin_lock_init(&chip->gpio_lock); + raw_spin_lock_init(&chip->gpio_lock); chip->gc.base = -1; chip->gc.ngpio = bitmap_weight(chip->hw_map, 64); @@ -664,12 +676,6 @@ static int xgpio_probe(struct platform_device *pdev) if (chip->irq <= 0) goto skip_irq; - chip->irqchip.name = "gpio-xilinx"; - chip->irqchip.irq_ack = xgpio_irq_ack; - chip->irqchip.irq_mask = xgpio_irq_mask; - chip->irqchip.irq_unmask = xgpio_irq_unmask; - chip->irqchip.irq_set_type = xgpio_set_irq_type; - /* Disable per-channel interrupts */ xgpio_writereg(chip->regs + XGPIO_IPIER_OFFSET, 0); /* Clear any existing per-channel interrupts */ @@ -679,7 +685,7 @@ static int xgpio_probe(struct platform_device *pdev) xgpio_writereg(chip->regs + XGPIO_GIER_OFFSET, XGPIO_GIER_IE); girq = &chip->gc.irq; - girq->chip = &chip->irqchip; + gpio_irq_chip_set_chip(girq, &xgpio_irq_chip); girq->parent_handler = xgpio_irqhandler; girq->num_parents = 1; girq->parents = devm_kcalloc(&pdev->dev, 1, diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 06c6401f02b8..1d8168153a1c 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c @@ -1012,6 +1012,7 @@ static int zynq_gpio_remove(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) dev_warn(&pdev->dev, "pm_runtime_get_sync() Failed\n"); + device_init_wakeup(&pdev->dev, 0); gpiochip_remove(&gpio->chip); clk_disable_unprepare(gpio->clk); device_set_wakeup_capable(&pdev->dev, 0); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 33d73687e463..baa77a8e8365 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1640,6 +1640,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = { .ignore_wake = "PNP0C50:00@8", }, }, + { + /* + * Spurious wakeups from GPIO 11 + * Found in BIOS 1.04 + * https://gitlab.freedesktop.org/drm/amd/-/issues/3954 + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"), + }, + .driver_data = &(struct acpi_gpiolib_dmi_quirk) { + .ignore_interrupt = "AMDI0030:00@11", + }, + }, {} /* Terminating entry */ }; diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 55f640ef3fee..897d20996a8c 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -2860,9 +2860,9 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file) struct gpio_chardev_data *cdev = file->private_data; struct gpio_device *gdev = cdev->gdev; - bitmap_free(cdev->watched_lines); blocking_notifier_chain_unregister(&gdev->notifier, &cdev->lineinfo_changed_nb); + bitmap_free(cdev->watched_lines); put_device(&gdev->dev); kfree(cdev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index dd22d2559720..af86402c70a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -781,6 +781,7 @@ struct amdgpu_device { bool need_swiotlb; bool accel_working; struct notifier_block acpi_nb; + struct notifier_block pm_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct debugfs_blob_wrapper debugfs_vbios_blob; struct debugfs_blob_wrapper debugfs_discovery_blob; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 7ad7ebaaa93c..5fa7f6d8aa30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -758,6 +758,7 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, return -EIO; } + kfree(info); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index a8dd63f270c3..3cca3f07f34d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -419,7 +419,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user ssize_t result = 0; int r; - if (size > 4096 || size & 0x3 || *pos & 0x3) + if (size & 0x3 || *pos & 0x3) return -EINVAL; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 1d0f6628f1d6..079cf3292f63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -139,6 +139,10 @@ const char *amdgpu_asic_name[] = { "LAST", }; +static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev); +static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, + void *data); + /** * DOC: pcie_replay_count * @@ -1220,6 +1224,17 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return 0; + /* resizing on Dell G5 SE platforms causes problems with runtime pm */ + if ((amdgpu_runtime_pm != 0) && + adev->pdev->vendor == PCI_VENDOR_ID_ATI && + adev->pdev->device == 0x731f && + adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) + return 0; + + /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ + if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) + DRM_WARN("System can't access extended configuration space,please check!!\n"); + /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size)) @@ -3979,6 +3994,11 @@ fence_driver_init: amdgpu_device_check_iommu_direct_map(adev); + adev->pm_nb.notifier_call = amdgpu_device_pm_notifier; + r = register_pm_notifier(&adev->pm_nb); + if (r) + goto failed; + return 0; release_ras_con: @@ -4040,6 +4060,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) flush_delayed_work(&adev->delayed_init_work); adev->shutdown = true; + unregister_pm_notifier(&adev->pm_nb); + /* make sure IB test finished before entering exclusive mode * to avoid preemption on IB test * */ @@ -4093,8 +4115,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) int idx; bool px; - amdgpu_fence_driver_sw_fini(adev); amdgpu_device_ip_fini(adev); + amdgpu_fence_driver_sw_fini(adev); release_firmware(adev->firmware.gpu_info_fw); adev->firmware.gpu_info_fw = NULL; adev->accel_working = false; @@ -4170,6 +4192,33 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) /* * Suspend & resume. */ +/** + * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events + * @nb: notifier block + * @mode: suspend mode + * @data: data + * + * This function is called when the system is about to suspend or hibernate. + * It is used to set the appropriate flags so that eviction can be optimized + * in the pm prepare callback. + */ +static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode, + void *data) +{ + struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb); + + switch (mode) { + case PM_HIBERNATION_PREPARE: + adev->in_s4 = true; + break; + case PM_POST_HIBERNATION: + adev->in_s4 = false; + break; + } + + return NOTIFY_DONE; +} + /** * amdgpu_device_prepare - prepare for device suspend * @@ -4620,6 +4669,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, retry: amdgpu_amdkfd_pre_reset(adev); + amdgpu_device_stop_pending_resets(adev); + if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else @@ -5491,11 +5542,12 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_adev->asic_reset_res = r; } - /* - * Drop all pending non scheduler resets. Scheduler resets - * were already dropped during drm_sched_stop - */ - amdgpu_device_stop_pending_resets(tmp_adev); + if (!amdgpu_sriov_vf(tmp_adev)) + /* + * Drop all pending non scheduler resets. Scheduler resets + * were already dropped during drm_sched_stop + */ + amdgpu_device_stop_pending_resets(tmp_adev); } tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); @@ -6013,6 +6065,9 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) struct amdgpu_device *adev = drm_to_adev(dev); int r; + if (amdgpu_sriov_vf(adev)) + return false; + r = pci_save_state(pdev); if (!r) { kfree(adev->pci_state); @@ -6172,6 +6227,7 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, { struct dma_fence *old = NULL; + dma_fence_get(gang); do { dma_fence_put(old); rcu_read_lock(); @@ -6181,12 +6237,19 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, if (old == gang) break; - if (!dma_fence_is_signaled(old)) + if (!dma_fence_is_signaled(old)) { + dma_fence_put(gang); return old; + } } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, old, gang) != old); + /* + * Drop it once for the exchanged reference in adev and once for the + * thread local reference acquired in amdgpu_device_get_gang(). + */ + dma_fence_put(old); dma_fence_put(old); return NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index e8b3e9520cf6..4dcc7de961d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,6 +42,29 @@ #include #include +static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops; + +/** + * dma_buf_attach_adev - Helper to get adev of an attachment + * + * @attach: attachment + * + * Returns: + * A struct amdgpu_device * if the attaching device is an amdgpu device or + * partition, NULL otherwise. + */ +static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach) +{ + if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) { + struct drm_gem_object *obj = attach->importer_priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + return amdgpu_ttm_adev(bo->tbo.bdev); + } + + return NULL; +} + /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation * @@ -53,12 +76,14 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { + struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach); struct drm_gem_object *obj = dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int r; - if (pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) + if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) && + pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) attach->peer2peer = false; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); @@ -210,7 +235,7 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { - if (sgt->sgl->page_link) { + if (sg_page(sgt->sgl)) { dma_unmap_sgtable(attach->dev, sgt, dir, 0); sg_free_table(sgt); kfree(sgt); @@ -479,6 +504,9 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, struct drm_gem_object *obj = &bo->tbo.base; struct drm_gem_object *gobj; + if (!adev) + return false; + if (obj->import_attach) { struct dma_buf *dma_buf = obj->import_attach->dmabuf; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 238c15c0c7e1..1fbb73b2691d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1662,7 +1662,6 @@ static const u16 amdgpu_unsupported_pciidlist[] = { }; static const struct pci_device_id pciidlist[] = { -#ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, @@ -1735,8 +1734,6 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY}, -#endif -#ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU}, @@ -1819,7 +1816,6 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, -#endif /* topaz */ {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, @@ -2099,14 +2095,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENOTSUPP; } + switch (flags & AMD_ASIC_MASK) { + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + case CHIP_HAINAN: #ifdef CONFIG_DRM_AMDGPU_SI - if (!amdgpu_si_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_TAHITI: - case CHIP_PITCAIRN: - case CHIP_VERDE: - case CHIP_OLAND: - case CHIP_HAINAN: + if (!amdgpu_si_support) { dev_info(&pdev->dev, "SI support provided by radeon.\n"); dev_info(&pdev->dev, @@ -2114,16 +2110,18 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ); return -ENODEV; } - } + break; +#else + dev_info(&pdev->dev, "amdgpu is built without SI support.\n"); + return -ENODEV; #endif + case CHIP_KAVERI: + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KABINI: + case CHIP_MULLINS: #ifdef CONFIG_DRM_AMDGPU_CIK - if (!amdgpu_cik_support) { - switch (flags & AMD_ASIC_MASK) { - case CHIP_KAVERI: - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_KABINI: - case CHIP_MULLINS: + if (!amdgpu_cik_support) { dev_info(&pdev->dev, "CIK support provided by radeon.\n"); dev_info(&pdev->dev, @@ -2131,8 +2129,14 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, ); return -ENODEV; } - } + break; +#else + dev_info(&pdev->dev, "amdgpu is built without CIK support.\n"); + return -ENODEV; #endif + default: + break; + } adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev); if (IS_ERR(adev)) @@ -2464,9 +2468,7 @@ static int amdgpu_pmops_freeze(struct device *dev) struct amdgpu_device *adev = drm_to_adev(drm_dev); int r; - adev->in_s4 = true; r = amdgpu_device_suspend(drm_dev, true); - adev->in_s4 = false; if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index f34bc9bb7045..7f1e92110dd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -150,7 +150,6 @@ void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, void amdgpu_job_free_resources(struct amdgpu_job *job) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); struct dma_fence *f; unsigned i; @@ -163,7 +162,7 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) f = NULL; for (i = 0; i < job->num_ibs; ++i) - amdgpu_ib_free(ring->adev, &job->ibs[i], f); + amdgpu_ib_free(NULL, &job->ibs[i], f); } static void amdgpu_job_free_cb(struct drm_sched_job *s_job) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index b86c0b8252a5..031b6d974f26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -132,13 +132,25 @@ static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = { */ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) { + int r; + if (bo->kfd_bo) - return mmu_interval_notifier_insert(&bo->notifier, current->mm, + r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, amdgpu_bo_size(bo), &amdgpu_mn_hsa_ops); - return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, - amdgpu_bo_size(bo), - &amdgpu_mn_gfx_ops); + else + r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, + amdgpu_bo_size(bo), + &amdgpu_mn_gfx_ops); + if (r) + /* + * Make sure amdgpu_hmm_unregister() doesn't call + * mmu_interval_notifier_remove() when the notifier isn't properly + * initialized. + */ + bo->notifier.mm = NULL; + + return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index f8740ad08af4..ae6643c8ade6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -43,7 +43,7 @@ #include "amdgpu_securedisplay.h" #include "amdgpu_atomfirmware.h" -#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*3) +#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16) static int psp_sysfs_init(struct amdgpu_device *adev); static void psp_sysfs_fini(struct amdgpu_device *adev); @@ -484,7 +484,6 @@ static int psp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct psp_context *psp = &adev->psp; - struct psp_gfx_cmd_resp *cmd = psp->cmd; psp_memory_training_fini(psp); if (psp->sos_fw) { @@ -511,8 +510,8 @@ static int psp_sw_fini(void *handle) adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) psp_sysfs_fini(adev); - kfree(cmd); - cmd = NULL; + kfree(psp->cmd); + psp->cmd = NULL; psp_free_shared_bufs(psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7afefaa37427..725afb497805 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -776,7 +776,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, /* Map SG to device */ r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); if (r) - goto release_sg; + goto release_sg_table; /* convert SG to linear array of pages and dma addresses */ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, @@ -784,6 +784,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, return 0; +release_sg_table: + sg_free_table(ttm->sg); release_sg: kfree(ttm->sg); ttm->sg = NULL; @@ -1730,6 +1732,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) mutex_init(&adev->mman.gtt_window_lock); + dma_set_max_seg_size(adev->dev, UINT_MAX); /* No others user of address space so set it to 0 */ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev, adev_to_drm(adev)->anon_inode->i_mapping, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index bc030588cd22..251416ad4652 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -220,15 +220,15 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) drm_sched_entity_destroy(&adev->vce.entity); - amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, - (void **)&adev->vce.cpu_addr); - for (i = 0; i < adev->vce.num_rings; i++) amdgpu_ring_fini(&adev->vce.ring[i]); release_firmware(adev->vce.fw); mutex_destroy(&adev->vce.idle_mutex); + amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, + (void **)&adev->vce.cpu_addr); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index d7b76a3d2d55..6174bef0ecb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -32,6 +32,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" +#include "amdgpu_reset.h" #include "vi.h" #include "soc15.h" #include "nv.h" @@ -456,7 +457,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) return -EINVAL; if (pf2vf_info->size > 1024) { - DRM_ERROR("invalid pf2vf message size\n"); + dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size); return -EINVAL; } @@ -467,7 +468,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, adev->virt.fw_reserve.checksum_key, checksum); if (checksum != checkval) { - DRM_ERROR("invalid pf2vf message\n"); + dev_err(adev->dev, + "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", + checksum, checkval); return -EINVAL; } @@ -481,7 +484,9 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size, 0, checksum); if (checksum != checkval) { - DRM_ERROR("invalid pf2vf message\n"); + dev_err(adev->dev, + "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n", + checksum, checkval); return -EINVAL; } @@ -517,7 +522,7 @@ static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev) ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid; break; default: - DRM_ERROR("invalid pf2vf version\n"); + dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version); return -EINVAL; } @@ -617,8 +622,21 @@ static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) int ret; ret = amdgpu_virt_read_pf2vf_data(adev); - if (ret) + if (ret) { + adev->virt.vf2pf_update_retry_cnt++; + if ((adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) && + amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev)) { + if (amdgpu_reset_domain_schedule(adev->reset_domain, + &adev->virt.flr_work)) + return; + else + dev_err(adev->dev, "Failed to queue work! at %s", __func__); + } + goto out; + } + + adev->virt.vf2pf_update_retry_cnt = 0; amdgpu_virt_write_vf2pf_data(adev); out: @@ -639,6 +657,7 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) adev->virt.fw_reserve.p_pf2vf = NULL; adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; + adev->virt.vf2pf_update_retry_cnt = 0; if (adev->mman.fw_vram_usage_va != NULL) { /* go through this logic in ip_init and reset to init workqueue*/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index dc6aaa4d67be..fc2859726f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -51,6 +51,8 @@ /* tonga/fiji use this offset */ #define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 +#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 30 + enum amdgpu_sriov_vf_mode { SRIOV_VF_MODE_BARE_METAL = 0, SRIOV_VF_MODE_ONE_VF, @@ -250,6 +252,7 @@ struct amdgpu_virt { /* vf2pf message */ struct delayed_work vf2pf_work; uint32_t vf2pf_update_interval_ms; + int vf2pf_update_retry_cnt; /* multimedia bandwidth config */ bool is_mm_bw_enabled; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 49a47807c42d..16be61abd998 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1060,10 +1060,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, * next command submission. */ if (bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv) { - uint32_t mem_type = bo->tbo.resource->mem_type; - - if (!(bo->preferred_domains & - amdgpu_mem_type_to_domain(mem_type))) + if (bo->tbo.resource && + !(bo->preferred_domains & + amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))) amdgpu_vm_bo_evicted(&bo_va->base); else amdgpu_vm_bo_idle(&bo_va->base); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index c76895cca4d9..61e869839641 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1301,7 +1301,7 @@ static int gfx_v11_0_sw_init(void *handle) adev->gfx.me.num_me = 1; adev->gfx.me.num_pipe_per_me = 1; adev->gfx.me.num_queue_per_pipe = 1; - adev->gfx.mec.num_mec = 2; + adev->gfx.mec.num_mec = 1; adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 4; break; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index ec4d5e15b766..de74686cb1db 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -92,12 +92,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) { uint64_t value; - /* Program the AGP BAR */ - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0); - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); - WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); - if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { + /* Program the AGP BAR */ + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BASE, 0); + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); + WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); + /* Program the system aperture low logical page number. */ WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c index 29c3484ae1f1..6b9f2e1d9d69 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c @@ -31,13 +31,25 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - if (!ring || !ring->funcs->emit_wreg) + if (!ring || !ring->funcs->emit_wreg) { WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); - else + if (amdgpu_sriov_vf(adev)) { + /* this is fine because SR_IOV doesn't remap the register */ + RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2); + } else { + /* We just need to read back a register to post the write. + * Reading back the remapped register causes problems on + * some platforms so just read back the memory size register. + */ + if (adev->nbio.funcs->get_memsize) + adev->nbio.funcs->get_memsize(adev); + } + } else { amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); + } } static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 12906ba74462..d39c6baae007 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -276,6 +276,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) timeout -= 10; } while (timeout > 1); + dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); + flr_done: atomic_set(&adev->reset_domain->in_gpu_reset, 0); up_write(&adev->reset_domain->sem); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index e07757eea7ad..a311a2425ec0 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -300,6 +300,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) timeout -= 10; } while (timeout > 1); + dev_warn(adev->dev, "waiting IDH_FLR_NOTIFICATION_CMPL timeout\n"); + flr_done: atomic_set(&adev->reset_domain->in_gpu_reset, 0); up_write(&adev->reset_domain->sem); diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c index def89379b51a..d23e7391c6f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c @@ -247,6 +247,12 @@ static void nbio_v7_7_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_SOC15(NBIO, 0, regBIF0_PCIE_MST_CTRL_3, data); + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 7, 0): + data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4) & ~BIT(23); + WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF5_STRAP4, data); + break; + } } static void nbio_v7_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 877989278290..0d4d6acae994 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -87,7 +87,7 @@ static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, }; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c373a2a3248e..9c16fff40f2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -125,7 +125,7 @@ static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] = {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, - {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 8192, 8192, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)}, }; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e668b3baa8c6..1c5d79528ca7 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1284,7 +1284,7 @@ static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); + struct amdgpu_ring *ring = amdgpu_job_ring(job); unsigned i; /* No patching necessary for the first instance */ diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index f1ba76c35cd6..e85a0fb227d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -109,6 +109,33 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); + if (enable) { + /* Unset the CLEAR_OVERFLOW bit to make sure the next step + * is switching the bit from 0 to 1 + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Clear RB_OVERFLOW bit */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1); + if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { + if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) + return -ETIMEDOUT; + } else { + WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp); + } + + /* Unset the CLEAR_OVERFLOW bit immediately so new overflows + * can be detected. + */ + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0); + } + /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index f83574107eb8..773913a7d6e9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -208,6 +208,11 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, return -EINVAL; } + if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) { + args->ring_size = KFD_MIN_QUEUE_RING_SIZE; + pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE"); + } + if (!access_ok((const void __user *) args->read_pointer_address, sizeof(uint32_t))) { pr_err("Can't access read pointer\n"); @@ -464,6 +469,11 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, return -EINVAL; } + if (args->ring_size < KFD_MIN_QUEUE_RING_SIZE) { + args->ring_size = KFD_MIN_QUEUE_RING_SIZE; + pr_debug("Size lower. clamped to KFD_MIN_QUEUE_RING_SIZE"); + } + properties.queue_address = args->ring_base_address; properties.queue_size = args->ring_size; properties.queue_percent = args->queue_percentage; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 8a7705db0b9a..a7ed47cb5bf6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -324,7 +324,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, spage = migrate_pfn_to_page(migrate->src[i]); if (spage && !is_zone_device_page(spage)) { src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, - DMA_TO_DEVICE); + DMA_BIDIRECTIONAL); r = dma_mapping_error(dev, src[i]); if (r) { dev_err(adev->dev, "%s: fail %d dma_map_page\n", @@ -623,7 +623,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, goto out_oom; } - dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE); + dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); r = dma_mapping_error(dev, dst[i]); if (r) { dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 9582c9449fff..fd7fecaa9254 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -36,6 +36,7 @@ #include #include "amdgpu_amdkfd.h" #include "amdgpu.h" +#include "amdgpu_reset.h" struct mm_struct; @@ -314,8 +315,8 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, attr_sdma); struct kfd_sdma_activity_handler_workarea sdma_activity_work_handler; - INIT_WORK(&sdma_activity_work_handler.sdma_activity_work, - kfd_sdma_activity_worker); + INIT_WORK_ONSTACK(&sdma_activity_work_handler.sdma_activity_work, + kfd_sdma_activity_worker); sdma_activity_work_handler.pdd = pdd; sdma_activity_work_handler.sdma_activity_counter = 0; @@ -323,6 +324,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr, schedule_work(&sdma_activity_work_handler.sdma_activity_work); flush_work(&sdma_activity_work_handler.sdma_activity_work); + destroy_work_on_stack(&sdma_activity_work_handler.sdma_activity_work); return snprintf(buffer, PAGE_SIZE, "%llu\n", (sdma_activity_work_handler.sdma_activity_counter)/ @@ -811,6 +813,14 @@ struct kfd_process *kfd_create_process(struct file *filep) if (thread->group_leader->mm != thread->mm) return ERR_PTR(-EINVAL); + /* If the process just called exec(3), it is possible that the + * cleanup of the kfd_process (following the release of the mm + * of the old process image) is still in the cleanup work queue. + * Make sure to drain any job before trying to recreate any + * resource for this process. + */ + flush_workqueue(kfd_process_wq); + /* * take kfd processes mutex before starting of process creation * so there won't be a case where two threads of the same process @@ -823,14 +833,6 @@ struct kfd_process *kfd_create_process(struct file *filep) if (process) { pr_debug("Process already found\n"); } else { - /* If the process just called exec(3), it is possible that the - * cleanup of the kfd_process (following the release of the mm - * of the old process image) is still in the cleanup work queue. - * Make sure to drain any job before trying to recreate any - * resource for this process. - */ - flush_workqueue(kfd_process_wq); - process = create_process(thread); if (IS_ERR(process)) goto out; @@ -1113,6 +1115,17 @@ static void kfd_process_remove_sysfs(struct kfd_process *p) p->kobj = NULL; } +/* + * If any GPU is ongoing reset, wait for reset complete. + */ +static void kfd_process_wait_gpu_reset_complete(struct kfd_process *p) +{ + int i; + + for (i = 0; i < p->n_pdds; i++) + flush_workqueue(p->pdds[i]->dev->adev->reset_domain->wq); +} + /* No process locking is needed in this function, because the process * is not findable any more. We must assume that no other thread is * using it any more, otherwise we couldn't safely free the process @@ -1126,6 +1139,11 @@ static void kfd_process_wq_release(struct work_struct *work) kfd_process_dequeue_from_all_devices(p); pqm_uninit(&p->pqm); + /* + * If GPU in reset, user queues may still running, wait for reset complete. + */ + kfd_process_wait_gpu_reset_complete(p); + /* Signal the eviction fence after user mode queues are * destroyed. This allows any BOs to be freed without * triggering pointless evictions or waiting for fences. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 1918a3c06ac8..a15bf1e38276 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -429,7 +429,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n", pqm->process->pasid, pqn->q->properties.queue_id, retval); - if (retval != -ETIME) + if (retval != -ETIME && retval != -EIO) goto err_destroy_queue; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8dc0f70df24f..64f626cc7913 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -230,6 +230,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); static void handle_hpd_rx_irq(void *param); +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, + int bl_idx, + u32 user_brightness); + static bool is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state); @@ -2791,16 +2795,16 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, for (k = 0; k < dc_state->stream_count; k++) { bundle->stream_update.stream = dc_state->streams[k]; - for (m = 0; m < dc_state->stream_status->plane_count; m++) { + for (m = 0; m < dc_state->stream_status[k].plane_count; m++) { bundle->surface_updates[m].surface = - dc_state->stream_status->plane_states[m]; + dc_state->stream_status[k].plane_states[m]; bundle->surface_updates[m].surface->force_full_update = true; } update_planes_and_stream_adapter(dm->dc, UPDATE_TYPE_FULL, - dc_state->stream_status->plane_count, + dc_state->stream_status[k].plane_count, dc_state->streams[k], &bundle->stream_update, bundle->surface_updates); @@ -2885,6 +2889,12 @@ static int dm_resume(void *handle) mutex_unlock(&dm->dc_lock); + /* set the backlight after a reset */ + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i]) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + return 0; } /* Recreate dc_state - DC invalidates it when setting power state to S3. */ @@ -4484,17 +4494,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } } + if (link_cnt > (MAX_PIPES * 2)) { + DRM_ERROR( + "KMS: Cannot support more than %d display indexes\n", + MAX_PIPES * 2); + goto fail; + } + /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { struct dc_link *link = NULL; - if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { - DRM_ERROR( - "KMS: Cannot support more than %d display indexes\n", - AMDGPU_DM_MAX_DISPLAY_INDEX); - continue; - } - aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); if (!aconnector) goto fail; @@ -7266,7 +7276,7 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, int i; int result = -EIO; - if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) + if (!ddc_service->ddc_pin) return result; cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); @@ -9343,6 +9353,9 @@ static bool should_reset_plane(struct drm_atomic_state *state, if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) return true; + if (amdgpu_in_reset(adev) && state->allow_modeset) + return true; + /* Exit early if we know that we're adding or removing the plane. */ if (old_plane_state->crtc != new_plane_state->crtc) return true; @@ -10675,7 +10688,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( * Transient states before tunneling is enabled could * lead to this error. We can ignore this for now. */ - if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { + if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) { DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", payload->address, payload->length, p_notify->result); @@ -10684,22 +10697,15 @@ int amdgpu_dm_process_dmub_aux_transfer_sync( goto out; } + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF; + if (adev->dm.dmub_notify->aux_reply.command & 0xF0) + /* The reply is stored in the top nibble of the command. */ + payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF; - payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; - if (!payload->write && p_notify->aux_reply.length && - (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { - - if (payload->length != p_notify->aux_reply.length) { - DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", - p_notify->aux_reply.length, - payload->address, payload->length); - *operation_result = AUX_RET_ERROR_INVALID_REPLY; - goto out; - } - + /*write req may receive a byte indicating partially written number as well*/ + if (p_notify->aux_reply.length) memcpy(payload->data, p_notify->aux_reply.data, p_notify->aux_reply.length); - } /* success */ ret = p_notify->aux_reply.length; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 6202e31c7e3a..6110d88efdbb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -39,10 +39,10 @@ static bool lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) { - struct dc_link *link = handle; struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; - struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -52,8 +52,10 @@ lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint3 { struct dc_link *link = handle; - struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; - struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, + {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; return dm_helpers_submit_i2c(link->ctx, link, &cmd); } @@ -76,7 +78,6 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -96,13 +97,12 @@ static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version; *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size; - return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf; } -static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version) +static int psp_set_srm(struct psp_context *psp, + u8 *srm, uint32_t srm_size, uint32_t *srm_version) { - struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.context.initialized) { @@ -119,7 +119,8 @@ static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); - if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || + hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX) return -EINVAL; @@ -150,7 +151,6 @@ static void process_output(struct hdcp_workqueue *hdcp_work) static void link_lock(struct hdcp_workqueue *work, bool lock) { - int i = 0; for (i = 0; i < work->max_link; i++) { @@ -160,72 +160,69 @@ static void link_lock(struct hdcp_workqueue *work, bool lock) mutex_unlock(&work[i].mutex); } } + void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, - uint8_t content_type, + u8 content_type, bool enable_encryption) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; - struct mod_hdcp_display *display = &hdcp_work[link_index].display; - struct mod_hdcp_link *link = &hdcp_work[link_index].link; - struct mod_hdcp_display_query query; + struct mod_hdcp_link_adjustment link_adjust; + struct mod_hdcp_display_adjustment display_adjust; + unsigned int conn_index = aconnector->base.index; - mutex_lock(&hdcp_w->mutex); - hdcp_w->aconnector = aconnector; + guard(mutex)(&hdcp_w->mutex); + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = aconnector; - query.display = NULL; - mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query); + memset(&link_adjust, 0, sizeof(link_adjust)); + memset(&display_adjust, 0, sizeof(display_adjust)); - if (query.display != NULL) { - memcpy(display, query.display, sizeof(struct mod_hdcp_display)); - mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + if (enable_encryption) { + /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp + * (s3 resume case) + */ + if (hdcp_work->srm_size > 0) + psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, + hdcp_work->srm_size, + &hdcp_work->srm_version); - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; - if (enable_encryption) { - /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp - * (s3 resume case) - */ - if (hdcp_work->srm_size > 0) - psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size, - &hdcp_work->srm_version); + link_adjust.auth_delay = 2; - display->adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; - if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { - hdcp_w->link.adjust.hdcp1.disable = 0; - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; - } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { - hdcp_w->link.adjust.hdcp1.disable = 1; - hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; - } - - schedule_delayed_work(&hdcp_w->property_validate_dwork, - msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); - } else { - display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - cancel_delayed_work(&hdcp_w->property_validate_dwork); + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { + link_adjust.hdcp1.disable = 1; + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; } - display->state = MOD_HDCP_DISPLAY_ACTIVE; + schedule_delayed_work(&hdcp_w->property_validate_dwork, + msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + } else { + display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; + hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + cancel_delayed_work(&hdcp_w->property_validate_dwork); } - mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output); process_output(hdcp_w); - mutex_unlock(&hdcp_w->mutex); } static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, - unsigned int link_index, + unsigned int link_index, struct amdgpu_dm_connector *aconnector) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct drm_connector_state *conn_state = aconnector->base.state; + unsigned int conn_index = aconnector->base.index; - mutex_lock(&hdcp_w->mutex); - hdcp_w->aconnector = aconnector; + guard(mutex)(&hdcp_w->mutex); /* the removal of display will invoke auth reset -> hdcp destroy and * we'd expect the Content Protection (CP) property changed back to @@ -236,28 +233,39 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n", - aconnector->base.index, conn_state->hdcp_content_type, aconnector->base.dpms); + aconnector->base.index, conn_state->hdcp_content_type, + aconnector->base.dpms); } mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); - + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } process_output(hdcp_w); - mutex_unlock(&hdcp_w->mutex); } + void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) { struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + unsigned int conn_index; - mutex_lock(&hdcp_w->mutex); + guard(mutex)(&hdcp_w->mutex); mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); cancel_delayed_work(&hdcp_w->property_validate_dwork); - hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + hdcp_w->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + if (hdcp_w->aconnector[conn_index]) { + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = NULL; + } + } process_output(hdcp_w); - - mutex_unlock(&hdcp_w->mutex); } void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) @@ -267,17 +275,14 @@ void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index schedule_work(&hdcp_w->cpirq_work); } - - - static void event_callback(struct work_struct *work) { struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, - callback_dwork); + callback_dwork); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); cancel_delayed_work(&hdcp_work->callback_dwork); @@ -285,54 +290,79 @@ static void event_callback(struct work_struct *work) &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); - - } + static void event_property_update(struct work_struct *work) { - - struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); - struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; - struct drm_device *dev = hdcp_work->aconnector->base.dev; + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, + property_update_work); + struct amdgpu_dm_connector *aconnector = NULL; + struct drm_device *dev; long ret; + unsigned int conn_index; + struct drm_connector *connector; + struct drm_connector_state *conn_state; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - mutex_lock(&hdcp_work->mutex); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; + if (!aconnector) + continue; - if (aconnector->base.state && aconnector->base.state->commit) { - ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + if (!aconnector->base.index) + continue; - if (ret == 0) { - DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); - hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + connector = &aconnector->base; + + /* check if display connected */ + if (connector->status != connector_status_connected) + continue; + + conn_state = aconnector->base.state; + + if (!conn_state) + continue; + + dev = connector->dev; + + if (!dev) + continue; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + guard(mutex)(&hdcp_work->mutex); + + if (conn_state->commit) { + ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, + 10 * HZ); + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n"); + hdcp_work->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } } - } - - if (aconnector->base.state) { - if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { - if (aconnector->base.state->hdcp_content_type == + if (hdcp_work->encryption_status[conn_index] != + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE0 && - hdcp_work->encryption_status <= - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) - drm_hdcp_update_content_protection(&aconnector->base, - DRM_MODE_CONTENT_PROTECTION_ENABLED); - else if (aconnector->base.state->hdcp_content_type == + hdcp_work->encryption_status[conn_index] <= + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); + } else if (conn_state->hdcp_content_type == DRM_MODE_HDCP_CONTENT_TYPE1 && - hdcp_work->encryption_status == - MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) - drm_hdcp_update_content_protection(&aconnector->base, - DRM_MODE_CONTENT_PROTECTION_ENABLED); + hdcp_work->encryption_status[conn_index] == + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); + } } else { - drm_hdcp_update_content_protection(&aconnector->base, - DRM_MODE_CONTENT_PROTECTION_DESIRED); + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED); } + drm_modeset_unlock(&dev->mode_config.connection_mutex); } - - mutex_unlock(&hdcp_work->mutex); - drm_modeset_unlock(&dev->mode_config.connection_mutex); } static void event_property_validate(struct work_struct *work) @@ -340,22 +370,53 @@ static void event_property_validate(struct work_struct *work) struct hdcp_workqueue *hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); struct mod_hdcp_display_query query; - struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + struct amdgpu_dm_connector *aconnector; + unsigned int conn_index; - if (!aconnector) - return; + guard(mutex)(&hdcp_work->mutex); - mutex_lock(&hdcp_work->mutex); + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; + conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; - query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; - mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); - if (query.encryption_status != hdcp_work->encryption_status) { - hdcp_work->encryption_status = query.encryption_status; - schedule_work(&hdcp_work->property_update_work); + if (!aconnector) + continue; + + if (!aconnector->base.index) + continue; + + /* check if display connected */ + if (aconnector->base.status != connector_status_connected) + continue; + + if (!aconnector->base.state) + continue; + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, + &query); + + DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", + aconnector->base.index, + aconnector->base.state->content_protection, + query.encryption_status, + hdcp_work->encryption_status[conn_index]); + + if (query.encryption_status != + hdcp_work->encryption_status[conn_index]) { + DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", + hdcp_work->encryption_status[conn_index], + query.encryption_status); + + hdcp_work->encryption_status[conn_index] = + query.encryption_status; + + DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n"); + + schedule_work(&hdcp_work->property_update_work); + } } - - mutex_unlock(&hdcp_work->mutex); } static void event_watchdog_timer(struct work_struct *work) @@ -363,10 +424,10 @@ static void event_watchdog_timer(struct work_struct *work) struct hdcp_workqueue *hdcp_work; hdcp_work = container_of(to_delayed_work(work), - struct hdcp_workqueue, + struct hdcp_workqueue, watchdog_timer_dwork); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); @@ -375,9 +436,6 @@ static void event_watchdog_timer(struct work_struct *work) &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); - } static void event_cpirq(struct work_struct *work) @@ -386,17 +444,13 @@ static void event_cpirq(struct work_struct *work) hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); - mutex_lock(&hdcp_work->mutex); + guard(mutex)(&hdcp_work->mutex); mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); process_output(hdcp_work); - - mutex_unlock(&hdcp_work->mutex); - } - void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) { int i = 0; @@ -404,6 +458,7 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) for (i = 0; i < hdcp_work->max_link; i++) { cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + cancel_delayed_work_sync(&hdcp_work[i].property_validate_dwork); } sysfs_remove_bin_file(kobj, &hdcp_work[0].attr); @@ -412,10 +467,8 @@ void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) kfree(hdcp_work); } - static bool enable_assr(void *handle, struct dc_link *link) { - struct hdcp_workqueue *hdcp_work = handle; struct mod_hdcp hdcp = hdcp_work->hdcp; struct psp_context *psp = hdcp.config.psp.handle; @@ -429,11 +482,12 @@ static bool enable_assr(void *handle, struct dc_link *link) dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf; - mutex_lock(&psp->dtm_context.mutex); + guard(mutex)(&psp->dtm_context.mutex); memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; - dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = link->link_enc_hw_inst; + dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = + link->link_enc_hw_inst; dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; psp_dtm_invoke(psp, dtm_cmd->cmd_id); @@ -443,8 +497,6 @@ static bool enable_assr(void *handle, struct dc_link *link) res = false; } - mutex_unlock(&psp->dtm_context.mutex); - return res; } @@ -453,9 +505,10 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) struct hdcp_workqueue *hdcp_work = handle; struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; int link_index = aconnector->dc_link->link_index; + unsigned int conn_index = aconnector->base.index; struct mod_hdcp_display *display = &hdcp_work[link_index].display; struct mod_hdcp_link *link = &hdcp_work[link_index].link; - struct drm_connector_state *conn_state; + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; struct dc_sink *sink = NULL; bool link_is_hdcp14 = false; @@ -475,7 +528,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) else if (aconnector->dc_em_sink) sink = aconnector->dc_em_sink; - if (sink != NULL) + if (sink) link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); display->controller = CONTROLLER_ID_D0 + config->otg_inst; @@ -497,19 +550,28 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; link->adjust.auth_delay = 3; link->adjust.hdcp1.disable = 0; - conn_state = aconnector->base.state; + hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, - (!!aconnector->base.state) ? aconnector->base.state->content_protection : -1, - (!!aconnector->base.state) ? aconnector->base.state->hdcp_content_type : -1); + (!!aconnector->base.state) ? + aconnector->base.state->content_protection : -1, + (!!aconnector->base.state) ? + aconnector->base.state->hdcp_content_type : -1); - if (conn_state) - hdcp_update_display(hdcp_work, link_index, aconnector, - conn_state->hdcp_content_type, false); + guard(mutex)(&hdcp_w->mutex); + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + drm_connector_get(&aconnector->base); + if (hdcp_w->aconnector[conn_index]) + drm_connector_put(&hdcp_w->aconnector[conn_index]->base); + hdcp_w->aconnector[conn_index] = aconnector; + process_output(hdcp_w); } - -/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel +/** + * DOC: Add sysfs interface for set/get srm + * + * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel * will automatically call once or twice depending on the size * * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is @@ -520,23 +582,23 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on * the last call we will send the full SRM. PSP will fail on every call before the last. * - * This means we don't know if the SRM is good until the last call. And because of this limitation we - * cannot throw errors early as it will stop the kernel from writing to sysfs + * This means we don't know if the SRM is good until the last call. And because of this + * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs * * Example 1: - * Good SRM size = 5096 - * first call to write 4096 -> PSP fails - * Second call to write 1000 -> PSP Pass -> SRM is set + * Good SRM size = 5096 + * first call to write 4096 -> PSP fails + * Second call to write 1000 -> PSP Pass -> SRM is set * * Example 2: - * Bad SRM size = 4096 - * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this - * is the last call) + * Bad SRM size = 4096 + * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this + * is the last call) * * Solution?: - * 1: Parse the SRM? -> It is signed so we don't know the EOF - * 2: We can have another sysfs that passes the size before calling set. -> simpler solution - * below + * 1: Parse the SRM? -> It is signed so we don't know the EOF + * 2: We can have another sysfs that passes the size before calling set. -> simpler solution + * below * * Easy Solution: * Always call get after Set to verify if set was successful. @@ -545,20 +607,21 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) * +----------------------+ * PSP will only update its srm if its older than the one we are trying to load. * Always do set first than get. - * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer - * version and save it + * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer + * version and save it * - * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the - * same(newer) version back and save it + * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the + * same(newer) version back and save it * - * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is - * incorrect/corrupted and we should correct our SRM by getting it from PSP + * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is + * incorrect/corrupted and we should correct our SRM by getting it from PSP */ -static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint32_t srm_version = 0; + u32 srm_version = 0; work = container_of(bin_attr, struct hdcp_workqueue, attr); link_lock(work, true); @@ -572,19 +635,19 @@ static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bi work->srm_version = srm_version; } - link_lock(work, false); return count; } -static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer, +static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, loff_t pos, size_t count) { struct hdcp_workqueue *work; - uint8_t *srm = NULL; - uint32_t srm_version; - uint32_t srm_size; + u8 *srm = NULL; + u32 srm_version; + u32 srm_size; size_t ret = count; work = container_of(bin_attr, struct hdcp_workqueue, attr); @@ -617,12 +680,12 @@ ret: /* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory. * * For example, - * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" - * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent - * across boot/reboots/suspend/resume/shutdown + * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" + * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent + * across boot/reboots/suspend/resume/shutdown * - * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need - * to make the SRM persistent. + * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP + * we need to make the SRM persistent. * * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory. * -The kernel cannot write to the file systems. @@ -632,8 +695,8 @@ ret: * * Usermode can read/write to/from PSP using the sysfs interface * For example: - * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile - * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm + * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile + * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm */ static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, @@ -642,10 +705,9 @@ static const struct bin_attribute data_attr = { .read = srm_data_read, }; - -struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc) +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, + struct cp_psp *cp_psp, struct dc *dc) { - int max_caps = dc->caps.max_links; struct hdcp_workqueue *hdcp_work; int i = 0; @@ -654,14 +716,16 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct if (ZERO_OR_NULL_PTR(hdcp_work)) return NULL; - hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL); + hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm), GFP_KERNEL); - if (hdcp_work->srm == NULL) + if (!hdcp_work->srm) goto fail_alloc_context; - hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL); + hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm_temp), GFP_KERNEL); - if (hdcp_work->srm_temp == NULL) + if (!hdcp_work->srm_temp) goto fail_alloc_context; hdcp_work->max_link = max_caps; @@ -686,6 +750,13 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + + memset(hdcp_work[i].aconnector, 0, + sizeof(struct amdgpu_dm_connector *) * + AMDGPU_DM_MAX_DISPLAY_INDEX); + memset(hdcp_work[i].encryption_status, 0, + sizeof(enum mod_hdcp_encryption_status) * + AMDGPU_DM_MAX_DISPLAY_INDEX); } cp_psp->funcs.update_stream_config = update_config; @@ -707,10 +778,5 @@ fail_alloc_context: kfree(hdcp_work); return NULL; - - - } - - diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index bbbf7d0eff82..69b445b011c8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -43,7 +43,7 @@ struct hdcp_workqueue { struct delayed_work callback_dwork; struct delayed_work watchdog_timer_dwork; struct delayed_work property_validate_dwork; - struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX]; struct mutex mutex; struct mod_hdcp hdcp; @@ -51,8 +51,7 @@ struct hdcp_workqueue { struct mod_hdcp_display display; struct mod_hdcp_link link; - enum mod_hdcp_encryption_status encryption_status; - + enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX]; /* when display is unplugged from mst hub, connctor will be * destroyed within dm_dp_mst_connector_destroy. connector * hdcp perperties, like type, undesired, desired, enabled, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 19f543ba7205..56e24b050eec 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -885,6 +885,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int i; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -906,6 +907,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); + + /* Update reference counts for HPDs */ + for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) { + if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1)) + drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i); + } } /** @@ -921,6 +928,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter iter; + int i; drm_connector_list_iter_begin(dev, &iter); drm_for_each_connector_iter(connector, &iter) { @@ -941,4 +949,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) } } drm_connector_list_iter_end(&iter); + + /* Update reference counts for HPDs */ + for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) { + if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1)) + drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i); + } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 1acef5f3838f..495491decec1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -55,6 +55,9 @@ bool is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream); +/* + * This function handles both native AUX and I2C-Over-AUX transactions. + */ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) { @@ -63,6 +66,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum aux_return_code_type operation_result; struct amdgpu_device *adev; struct ddc_service *ddc; + uint8_t copy[16]; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -78,6 +82,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; payload.defer_delay = 0; + if (payload.write) { + memcpy(copy, msg->buffer, msg->size); + payload.data = copy; + } + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, &operation_result); @@ -91,15 +100,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (adev->dm.aux_hpd_discon_quirk) { if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && operation_result == AUX_RET_ERROR_HPD_DISCON) { - result = 0; + result = msg->size; operation_result = AUX_RET_SUCCESS; } } - if (payload.write && result >= 0) - result = msg->size; + /* + * result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER + */ + if (payload.write && result >= 0) { + if (result) { + /*one byte indicating partially written bytes*/ + drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n"); + result = payload.data[0]; + } else if (!payload.reply[0]) + /*I2C_ACK|AUX_ACK*/ + result = msg->size; + } - if (result < 0) + if (result < 0) { switch (operation_result) { case AUX_RET_SUCCESS: break; @@ -118,6 +137,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } + drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); + } + + if (payload.reply[0]) + drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", + payload.reply[0]); + return result; } @@ -1555,16 +1581,16 @@ clean_exit: return ret; } -static unsigned int kbps_from_pbn(unsigned int pbn) +static uint32_t kbps_from_pbn(unsigned int pbn) { - unsigned int kbps = pbn; + uint64_t kbps = (uint64_t)pbn; kbps *= (1000000 / PEAK_FACTOR_X1000); kbps *= 8; kbps *= 54; kbps /= 64; - return kbps; + return (uint32_t)kbps; } static bool is_dsc_common_config_possible(struct dc_stream_state *stream, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 3eb8794807d2..cb4b17c3b694 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -51,7 +51,8 @@ static bool link_supports_psrsu(struct dc_link *link) !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) return false; - return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); + /* Temporarily disable PSR-SU to avoid glitches */ + return false; } /* diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 9b8ea6e9a2b9..0f686e363d30 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -664,6 +664,9 @@ static enum bp_result get_ss_info_v3_1( ss_table_header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3, DATA_TABLES(ASIC_InternalSS_Info)); + if (!ss_table_header_include) + return BP_RESULT_UNSUPPORTED; + table_size = (le16_to_cpu(ss_table_header_include->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) @@ -1031,6 +1034,8 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1( header = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2, DATA_TABLES(ASIC_InternalSS_Info)); + if (!header) + return result; memset(info, 0, sizeof(struct spread_spectrum_info)); @@ -1104,6 +1109,8 @@ static enum bp_result get_ss_info_from_ss_info_table( get_atom_data_table_revision(header, &revision); tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info)); + if (!tbl) + return result; if (1 != revision.major || 2 > revision.minor) return result; @@ -1631,6 +1638,8 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl( tbl = GET_IMAGE(ATOM_SPREAD_SPECTRUM_INFO, DATA_TABLES(SS_Info)); + if (!tbl) + return number; if (1 != revision.major || 2 > revision.minor) return number; @@ -1711,6 +1720,8 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1( header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V2, DATA_TABLES(ASIC_InternalSS_Info)); + if (!header_include) + return 0; size = (le16_to_cpu(header_include->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) @@ -1748,6 +1759,9 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_V3_1( header_include = GET_IMAGE(ATOM_ASIC_INTERNAL_SS_INFO_V3, DATA_TABLES(ASIC_InternalSS_Info)); + if (!header_include) + return number; + size = (le16_to_cpu(header_include->sHeader.usStructureSize) - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 4d2590964a20..75e44d8a7b40 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1862,19 +1862,21 @@ static enum bp_result get_firmware_info_v3_2( /* Vega12 */ smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); if (!smu_info_v3_2) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_2->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; } else if (revision.minor == 3) { /* Vega20 */ smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, DATA_TABLES(smu_info)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); if (!smu_info_v3_3) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", smu_info_v3_3->gpuclk_ss_percentage); + info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; } @@ -2439,10 +2441,11 @@ static enum bp_result get_integrated_info_v11( info_v11 = GET_IMAGE(struct atom_integrated_system_info_v1_11, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); if (info_v11 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v11->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v11->gpucapinfo); /* @@ -2654,11 +2657,12 @@ static enum bp_result get_integrated_info_v2_1( info_v2_1 = GET_IMAGE(struct atom_integrated_system_info_v2_1, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); if (info_v2_1 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_1->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_1->gpucapinfo); /* @@ -2816,11 +2820,11 @@ static enum bp_result get_integrated_info_v2_2( info_v2_2 = GET_IMAGE(struct atom_integrated_system_info_v2_2, DATA_TABLES(integratedsysteminfo)); - DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); - if (info_v2_2 == NULL) return BP_RESULT_BADBIOSTABLE; + DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", info_v2_2->gpuclk_ss_percentage); + info->gpu_cap_info = le32_to_cpu(info_v2_2->gpucapinfo); /* diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 3ce0ee0d012f..367b163537c4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -568,11 +568,19 @@ void dcn3_clk_mgr_construct( dce_clock_read_ss_info(clk_mgr); clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); + if (!clk_mgr->base.bw_params) { + BREAK_TO_DEBUGGER(); + return; + } /* need physical address of table to give to PMFW */ clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &clk_mgr->wm_range_table_addr); + if (!clk_mgr->wm_range_table) { + BREAK_TO_DEBUGGER(); + return; + } } void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index 09eb1bc9aa03..9549f9c15229 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -116,7 +116,7 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; @@ -192,15 +192,19 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; } - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - /* No need to apply the w/a if we haven't taken over from bios yet */ - if (clk_mgr_base->clks.dispclk_khz) - dcn315_disable_otg_wa(clk_mgr_base, context, true); + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz; + dcn315_disable_otg_wa(clk_mgr_base, context, true); + + /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz; + + dcn315_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn315_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - if (clk_mgr_base->clks.dispclk_khz) - dcn315_disable_otg_wa(clk_mgr_base, context, false); + dcn315_disable_otg_wa(clk_mgr_base, context, false); update_dispclk = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 29d2003fb712..afce15aa2ff1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -153,7 +153,7 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; - int display_count; + int display_count = 0; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; @@ -226,11 +226,18 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, update_dppclk = true; } - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) && + (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) { + int requested_dispclk_khz = new_clocks->dispclk_khz; + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); + /* Clamp the requested clock to PMFW based on their limit. */ + if (dc->debug.min_disp_clk_khz > 0 && requested_dispclk_khz < dc->debug.min_disp_clk_khz) + requested_dispclk_khz = dc->debug.min_disp_clk_khz; + + dcn316_smu_set_dispclk(clk_mgr, requested_dispclk_khz); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; - dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 1d84a04acb3f..3ade764d3323 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -816,11 +816,19 @@ void dcn32_clk_mgr_construct( clk_mgr->smu_present = false; clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); + if (!clk_mgr->base.bw_params) { + BREAK_TO_DEBUGGER(); + return; + } /* need physical address of table to give to PMFW */ clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &clk_mgr->wm_range_table_addr); + if (!clk_mgr->wm_range_table) { + BREAK_TO_DEBUGGER(); + return; + } } void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2721842af806..10672bb90a02 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -267,6 +267,7 @@ static bool create_links( link->link_id.type = OBJECT_TYPE_CONNECTOR; link->link_id.id = CONNECTOR_ID_VIRTUAL; link->link_id.enum_id = ENUM_ID_1; + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); if (!link->link_enc) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index bbaeb6c567d0..f0db2d58fd6b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -83,7 +83,7 @@ static void dc_link_destruct(struct dc_link *link) if (link->panel_cntl) link->panel_cntl->funcs->destroy(&link->panel_cntl); - if (link->link_enc) { + if (link->link_enc && !link->is_dig_mapping_flexible) { /* Update link encoder resource tracking variables. These are used for * the dynamic assignment of link encoders to streams. Virtual links * are not assigned encoder resources on creation. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 8b27fe2f5ab1..f036e9988a0d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1100,7 +1100,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Invalid input */ - if (!plane_state->dst_rect.width || + if (!plane_state || + !plane_state->dst_rect.width || !plane_state->dst_rect.height || !plane_state->src_rect.width || !plane_state->src_rect.height) { @@ -2327,10 +2328,13 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing) break; case COLOR_DEPTH_121212: normalized_pix_clk = (pix_clk * 36) / 24; - break; + break; + case COLOR_DEPTH_141414: + normalized_pix_clk = (pix_clk * 42) / 24; + break; case COLOR_DEPTH_161616: normalized_pix_clk = (pix_clk * 48) / 24; - break; + break; default: ASSERT(0); break; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7e775cec0692..fb0aac0ee88e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -49,7 +49,7 @@ struct dmub_notification; #define DC_VER "3.2.207" -#define MAX_SURFACES 3 +#define MAX_SURFACES 4 #define MAX_PLANES 6 #define MAX_STREAMS 6 #define MAX_SINKS_PER_LINK 4 diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 3f32e9c3fbaf..b4f9553f7c8b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -65,7 +65,23 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, bool should_use_dmub_lock(struct dc_link *link) { + /* ASIC doesn't support DMUB */ + if (!link->ctx->dmub_srv) + return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) return true; + + /* only use HW lock for PSR1 on single eDP */ + if (link->psr_settings.psr_version == DC_PSR_VERSION_1) { + struct dc_link *edp_links[MAX_NUM_EDP]; + int edp_num; + + get_edp_links(link->dc, edp_links, &edp_num); + + if (edp_num == 1) + return true; + } + return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 416168c7dcc5..b120aa67d26c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1947,20 +1947,11 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) dc->hwss.get_position(&pipe_ctx, 1, &position); vpos = position.vertical_count; - /* Avoid wraparound calculation issues */ - vupdate_start += stream->timing.v_total; - vupdate_end += stream->timing.v_total; - vpos += stream->timing.v_total; - if (vpos <= vupdate_start) { /* VPOS is in VACTIVE or back porch. */ lines_to_vupdate = vupdate_start - vpos; - } else if (vpos > vupdate_end) { - /* VPOS is in the front porch. */ - return; } else { - /* VPOS is in VUPDATE. */ - lines_to_vupdate = 0; + lines_to_vupdate = stream->timing.v_total - vpos + vupdate_start; } /* Calculate time until VUPDATE in microseconds. */ @@ -1968,13 +1959,18 @@ static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx) stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz; us_to_vupdate = lines_to_vupdate * us_per_line; + /* Stall out until the cursor update completes. */ + if (vupdate_end < vupdate_start) + vupdate_end += stream->timing.v_total; + + /* Position is in the range of vupdate start and end*/ + if (lines_to_vupdate > stream->timing.v_total - vupdate_end + vupdate_start) + us_to_vupdate = 0; + /* 70 us is a conservative estimate of cursor update time*/ if (us_to_vupdate > 70) return; - /* Stall out until the cursor update completes. */ - if (vupdate_end < vupdate_start) - vupdate_end += stream->timing.v_total; us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line; udelay(us_to_vupdate + us_vupdate); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 9bd6a5716cdc..81b1ab55338a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -856,7 +856,8 @@ bool dcn20_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx, /* * if above if is not executed then 'params' equal to 0 and set in bypass */ - mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + if (mpc->funcs->set_output_gamma) + mpc->funcs->set_output_gamma(mpc, mpcc_id, params); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index ce6c70e25703..dd71ea2568e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -1340,7 +1340,7 @@ static struct link_encoder *dcn21_link_encoder_create( kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); int link_regs_id; - if (!enc21) + if (!enc21 || enc_init_data->hpd_source >= ARRAY_SIZE(link_enc_hpd_regs)) return NULL; link_regs_id = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 50dc83404644..4ce45f1bdac0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -392,11 +392,6 @@ bool dpp3_get_optimal_number_of_taps( int min_taps_y, min_taps_c; enum lb_memory_config lb_config; - if (scl_data->viewport.width > scl_data->h_active && - dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - /* * Set default taps if none are provided * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling @@ -434,6 +429,12 @@ bool dpp3_get_optimal_number_of_taps( else scl_data->taps.h_taps_c = in_taps->h_taps_c; + // Avoid null data in the scl data with this early return, proceed non-adaptive calcualtion first + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + /*Ensure we can support the requested number of vtaps*/ min_taps_y = dc_fixpt_ceil(scl_data->ratios.vert); min_taps_c = dc_fixpt_ceil(scl_data->ratios.vert_c); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 7a643690fdc7..b2cbff798ed8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -448,7 +448,7 @@ void dcn30_init_hw(struct dc *dc) int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; - if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // Initialize the dccg @@ -631,11 +631,12 @@ void dcn30_init_hw(struct dc *dc) if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); - if (dc->clk_mgr->funcs->notify_wm_ranges) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); //if softmax is enabled then hardmax will be set by a different call - if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk && + !dc->clk_mgr->dc_mode_softmax_enabled) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); if (dc->res_pool->hubbub->funcs->force_pstate_change_control) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 5a8d1a051314..6c58618f1b9c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -2062,6 +2062,9 @@ bool dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + if (!pipes) + goto validate_fail; + DC_FP_START(); out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c index 39a57bcd7866..576acf2ce10d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c @@ -44,7 +44,7 @@ void hubp31_set_unbounded_requesting(struct hubp *hubp, bool enable) struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); REG_UPDATE(DCHUBP_CNTL, HUBP_UNBOUNDED_REQ_MODE, enable); - REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, enable); + REG_UPDATE(CURSOR_CONTROL, CURSOR_REQ_MODE, 1); } void hubp31_soft_reset(struct hubp *hubp, bool reset) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index d3f76512841b..73a7b1ec353e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -1324,6 +1324,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], @@ -1773,6 +1775,9 @@ bool dcn31_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + if (!pipes) + goto validate_fail; + DC_FP_START(); out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); DC_FP_END(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index 6b8abdb5c7f8..1a4280b89a1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -1372,6 +1372,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], @@ -1743,6 +1745,9 @@ bool dcn314_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + if (!pipes) + goto validate_fail; + if (filter_modes_for_single_channel_workaround(dc, context)) goto validate_fail; diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c index b9b1e5ac4f53..9d643c79afea 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c @@ -1325,6 +1325,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], @@ -1715,7 +1717,7 @@ static int dcn315_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.dsc_input_bpc = 0; DC_FP_START(); dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt); - if (pixel_rate_crb && !pipe->top_pipe && !pipe->prev_odm_pipe) { + if (pixel_rate_crb) { int bpp = source_format_to_bpp(pipes[pipe_cnt].pipe.src.source_format); /* Ceil to crb segment size */ int approx_det_segs_required_for_pstate = dcn_get_approx_det_segs_required_for_pstate( @@ -1766,28 +1768,26 @@ static int dcn315_populate_dml_pipes_from_context( continue; } - if (!pipe->top_pipe && !pipe->prev_odm_pipe) { - bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) - || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); + bool split_required = pipe->stream->timing.pix_clk_100hz >= dcn_get_max_non_odm_pix_rate_100hz(&dc->dml.soc) + || (pipe->plane_state && pipe->plane_state->src_rect.width > 5120); - if (remaining_det_segs > MIN_RESERVED_DET_SEGS) - pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + - (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); - if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { - /* Clamp to 2 pipe split max det segments */ - remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); - pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; - } - if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { - /* If we are splitting we must have an even number of segments */ - remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; - pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; - } - /* Convert segments into size for DML use */ - pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; - - crb_idx++; + if (remaining_det_segs > MIN_RESERVED_DET_SEGS && crb_pipes != 0) + pipes[pipe_cnt].pipe.src.det_size_override += (remaining_det_segs - MIN_RESERVED_DET_SEGS) / crb_pipes + + (crb_idx < (remaining_det_segs - MIN_RESERVED_DET_SEGS) % crb_pipes ? 1 : 0); + if (pipes[pipe_cnt].pipe.src.det_size_override > 2 * DCN3_15_MAX_DET_SEGS) { + /* Clamp to 2 pipe split max det segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override - 2 * (DCN3_15_MAX_DET_SEGS); + pipes[pipe_cnt].pipe.src.det_size_override = 2 * DCN3_15_MAX_DET_SEGS; } + if (pipes[pipe_cnt].pipe.src.det_size_override > DCN3_15_MAX_DET_SEGS || split_required) { + /* If we are splitting we must have an even number of segments */ + remaining_det_segs += pipes[pipe_cnt].pipe.src.det_size_override % 2; + pipes[pipe_cnt].pipe.src.det_size_override -= pipes[pipe_cnt].pipe.src.det_size_override % 2; + } + /* Convert segments into size for DML use */ + pipes[pipe_cnt].pipe.src.det_size_override *= DCN3_15_CRB_SEGMENT_SIZE_KB; + + crb_idx++; pipe_cnt++; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c index af3eddc0cf32..932fe9b5c08c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c @@ -1322,6 +1322,8 @@ static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst, &hpo_dp_link_enc_regs[inst], diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index bd75d3cba098..55a24d9f5b14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -667,7 +667,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc, } } - mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + if (mpc->funcs->set_output_gamma) + mpc->funcs->set_output_gamma(mpc, mpcc_id, params); + return ret; } @@ -809,7 +811,7 @@ void dcn32_init_hw(struct dc *dc) int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; - if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // Initialize the dccg @@ -968,10 +970,11 @@ void dcn32_init_hw(struct dc *dc) if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks) dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub); - if (dc->clk_mgr->funcs->notify_wm_ranges) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->notify_wm_ranges) dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); - if (dc->clk_mgr->funcs->set_hard_max_memclk && !dc->clk_mgr->dc_mode_softmax_enabled) + if (dc->clk_mgr && dc->clk_mgr->funcs && dc->clk_mgr->funcs->set_hard_max_memclk && + !dc->clk_mgr->dc_mode_softmax_enabled) dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr); if (dc->res_pool->hubbub->funcs->force_pstate_change_control) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 2b8700b291a4..1b1534ffee9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1310,6 +1310,8 @@ static struct hpo_dp_link_encoder *dcn32_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ #undef REG_STRUCT #define REG_STRUCT hpo_dp_link_enc_regs @@ -1796,6 +1798,9 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, // be a valid candidate for SubVP (i.e. has a plane, stream, doesn't // already have phantom pipe assigned, etc.) by previous checks. phantom_stream = dcn32_enable_phantom_stream(dc, context, pipes, pipe_cnt, index); + if (!phantom_stream) + return; + dcn32_enable_phantom_plane(dc, context, phantom_stream, index); for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1852,6 +1857,9 @@ bool dcn32_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + if (!pipes) + goto validate_fail; + DC_FP_START(); out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); DC_FP_END(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index aed92ced7b76..85430e5baa45 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -1296,6 +1296,8 @@ static struct hpo_dp_link_encoder *dcn321_hpo_dp_link_encoder_create( /* allocate HPO link encoder */ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL); + if (!hpo_dp_enc31) + return NULL; /* out of memory */ #undef REG_STRUCT #define REG_STRUCT hpo_dp_link_enc_regs diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 548cdef8a8ad..543ce9a08cfd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) { - unsigned int ret_val = 0; + unsigned int ret_val = 1; if (source_format == dm_444_16) { if (!is_chroma) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index 861f32b3248e..69521368ddb6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -283,10 +283,10 @@ static void CalculateDynamicMetadataParameters( double DISPCLK, double DCFClkDeepSleep, double PixelClock, - long HTotal, - long VBlank, - long DynamicMetadataTransmittedBytes, - long DynamicMetadataLinesBeforeActiveRequired, + unsigned int HTotal, + unsigned int VBlank, + unsigned int DynamicMetadataTransmittedBytes, + int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double *Tsetup, @@ -3280,8 +3280,8 @@ static double CalculateWriteBackDelay( static void CalculateDynamicMetadataParameters(int MaxInterDCNTileRepeaters, double DPPCLK, double DISPCLK, - double DCFClkDeepSleep, double PixelClock, long HTotal, long VBlank, long DynamicMetadataTransmittedBytes, - long DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, + double DCFClkDeepSleep, double PixelClock, unsigned int HTotal, unsigned int VBlank, unsigned int DynamicMetadataTransmittedBytes, + int DynamicMetadataLinesBeforeActiveRequired, int InterlaceEnable, bool ProgressiveToInterlaceUnitInOPP, double *Tsetup, double *Tdmbf, double *Tdmec, double *Tdmsks) { double TotalRepeaterDelayTime = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 85e0d1c2a908..9d8917f72d18 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -900,8 +900,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time, * and the max of (VBLANK blanking time, MALL region)). */ - if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 && - subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0) + if (drr_timing && + stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 && + subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0) schedulable = true; return schedulable; @@ -966,7 +967,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context) if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) { // SUBVP + DRR case schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]); - } else if (found) { + } else if (found && subvp_pipe) { main_timing = &subvp_pipe->stream->timing; phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing; vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index 3df559c591f8..70df992f859d 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -39,7 +39,7 @@ static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma) { - unsigned int ret_val = 0; + unsigned int ret_val = 1; if (source_format == dm_444_16) { if (!is_chroma) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index 072bd0539605..6b2ab4ec2b5f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -66,11 +66,15 @@ static inline double dml_max5(double a, double b, double c, double d, double e) static inline double dml_ceil(double a, double granularity) { + if (granularity == 0) + return 0; return (double) dcn_bw_ceil2(a, granularity); } static inline double dml_floor(double a, double granularity) { + if (granularity == 0) + return 0; return (double) dcn_bw_floor2(a, granularity); } @@ -114,11 +118,15 @@ static inline double dml_ceil_2(double f) static inline double dml_ceil_ex(double x, double granularity) { + if (granularity == 0) + return 0; return (double) dcn_bw_ceil2(x, granularity); } static inline double dml_floor_ex(double x, double granularity) { + if (granularity == 0) + return 0; return (double) dcn_bw_floor2(x, granularity); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index a24f3b35ae91..a75c04d510fd 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -3056,6 +3056,7 @@ static int kv_dpm_hw_init(void *handle) if (!amdgpu_dpm) return 0; + mutex_lock(&adev->pm.mutex); kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); if (ret) @@ -3063,6 +3064,8 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; amdgpu_legacy_dpm_compute_clocks(adev); + mutex_unlock(&adev->pm.mutex); + return ret; } @@ -3080,32 +3083,42 @@ static int kv_dpm_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_work_sync(&adev->pm.dpm.thermal.work); + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm_enabled = false; /* disable dpm */ kv_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); } return 0; } static int kv_dpm_resume(void *handle) { - int ret; + int ret = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->pm.dpm_enabled) { + if (!amdgpu_dpm) + return 0; + + if (!adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); /* asic init will reset to the boot state */ kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); - if (ret) + if (ret) { adev->pm.dpm_enabled = false; - else + } else { adev->pm.dpm_enabled = true; - if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); + } + mutex_unlock(&adev->pm.mutex); } - return 0; + return ret; } static bool kv_dpm_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index 291223ea7ba7..2fd97f5cf8f6 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -1018,9 +1018,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; int temp, size = sizeof(temp); - if (!adev->pm.dpm_enabled) - return; + mutex_lock(&adev->pm.mutex); + if (!adev->pm.dpm_enabled) { + mutex_unlock(&adev->pm.mutex); + return; + } if (!pp_funcs->read_sensor(adev->powerplay.pp_handle, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&temp, @@ -1042,4 +1045,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) adev->pm.dpm.state = dpm_state; amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index ff1032de4f76..52e4397d4a2a 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7796,6 +7796,7 @@ static int si_dpm_hw_init(void *handle) if (!amdgpu_dpm) return 0; + mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); if (ret) @@ -7803,6 +7804,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; amdgpu_legacy_dpm_compute_clocks(adev); + mutex_unlock(&adev->pm.mutex); return ret; } @@ -7820,32 +7822,44 @@ static int si_dpm_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + cancel_work_sync(&adev->pm.dpm.thermal.work); + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + adev->pm.dpm_enabled = false; /* disable dpm */ si_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); } + return 0; } static int si_dpm_resume(void *handle) { - int ret; + int ret = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->pm.dpm_enabled) { + if (!amdgpu_dpm) + return 0; + + if (!adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); - if (ret) + if (ret) { adev->pm.dpm_enabled = false; - else + } else { adev->pm.dpm_enabled = true; - if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); + } + mutex_unlock(&adev->pm.mutex); } - return 0; + + return ret; } static bool si_dpm_is_idle(void *handle) diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index eae4b4826f04..ab8ae7464664 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -51,6 +51,11 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->adev = adev; hwmgr->not_vf = !amdgpu_sriov_vf(adev); hwmgr->device = amdgpu_cgs_create_device(adev); + if (!hwmgr->device) { + kfree(hwmgr); + return -ENOMEM; + } + mutex_init(&hwmgr->msg_lock); hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c index cc3b62f73394..1fbd23922082 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c @@ -1420,6 +1420,8 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr GetIndexIntoMasterTable(DATA, SMU_Info), &size, &frev, &crev); + if (!psmu_info) + return -EINVAL; for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c index a6c3610db23e..7209e965dc52 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c @@ -267,10 +267,10 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (hwmgr->thermal_controller.fanInfo.bNoFan || (hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution == 0) || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return 0; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) smu7_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c index 190af79f3236..e93b7c4aa8c9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c @@ -307,10 +307,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) int result = 0; if (hwmgr->thermal_controller.fanInfo.bNoFan || - speed == 0 || + (!speed || speed > UINT_MAX/8) || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) - return -1; + return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c index f4f4efdbda79..56526f489902 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c @@ -191,7 +191,7 @@ int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) uint32_t tach_period, crystal_clock_freq; int result = 0; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 1a02930d93df..537285f255ab 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -516,7 +516,8 @@ static int smu_sys_set_pp_table(void *handle, return -EIO; } - if (!smu_table->hardcode_pptable) { + if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) { + kfree(smu_table->hardcode_pptable); smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); if (!smu_table->hardcode_pptable) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 9cd005131f56..ff4447702b12 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1273,6 +1273,9 @@ static int arcturus_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; + if (!speed || speed > UINT_MAX/8) + return -EINVAL; + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); WREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT, REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL_ARCT), diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 2e061a74a0b7..6c10140b15cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1228,7 +1228,7 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t crystal_clock_freq = 2500; uint32_t tach_period; - if (speed == 0) + if (!speed || speed > UINT_MAX/8) return -EINVAL; /* * To prevent from possible overheat, some ASICs may have requirement diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index c79bff02a31a..1ba1bb4f5bd7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1759,7 +1759,6 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity; gpu_metrics->average_umc_activity = metrics.AverageUclkActivity; - gpu_metrics->average_mm_activity = 0; /* Valid power data is available only from primary die */ if (aldebaran_is_primary(smu)) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 3aab1caed2ac..00af9edb9eba 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1265,7 +1265,7 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t tach_period; int ret; - if (!speed) + if (!speed || speed > UINT_MAX/8) return -EINVAL; ret = smu_v13_0_auto_fan_control(smu, 0); @@ -2498,6 +2498,9 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, uint32_t smu_pcie_arg; int ret, i; + if (!num_of_levels) + return 0; + if (!amdgpu_device_pcie_dynamic_switching_supported()) { if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index bd065f1c699f..2d5cfe4651b4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -1314,6 +1314,8 @@ static ssize_t smu_v13_0_7_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency; gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK]; + gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK]; + gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK]; gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0]; gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0]; gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1]; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c index ebccb74306a7..f30b3d5eeca5 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c @@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms, formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl, kwb_conn->wb_layer->layer_type, &n_formats); + if (!formats) { + kfree(kwb_conn); + return -ENOMEM; + } err = drm_writeback_connector_init(&kms->base, wb_conn, &komeda_wb_connector_funcs, diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 1bc0220e6783..9fe856fd8a84 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -103,7 +103,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, return false; } - switch (mode->crtc_hdisplay) { + switch (mode->hdisplay) { case 640: vbios_mode->enh_table = &res_640x480[refresh_rate_index]; break; @@ -117,7 +117,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, vbios_mode->enh_table = &res_1152x864[refresh_rate_index]; break; case 1280: - if (mode->crtc_vdisplay == 800) + if (mode->vdisplay == 800) vbios_mode->enh_table = &res_1280x800[refresh_rate_index]; else vbios_mode->enh_table = &res_1280x1024[refresh_rate_index]; @@ -129,7 +129,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, vbios_mode->enh_table = &res_1440x900[refresh_rate_index]; break; case 1600: - if (mode->crtc_vdisplay == 900) + if (mode->vdisplay == 900) vbios_mode->enh_table = &res_1600x900[refresh_rate_index]; else vbios_mode->enh_table = &res_1600x1200[refresh_rate_index]; @@ -138,7 +138,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, vbios_mode->enh_table = &res_1680x1050[refresh_rate_index]; break; case 1920: - if (mode->crtc_vdisplay == 1080) + if (mode->vdisplay == 1080) vbios_mode->enh_table = &res_1920x1080[refresh_rate_index]; else vbios_mode->enh_table = &res_1920x1200[refresh_rate_index]; @@ -182,6 +182,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, hborder = (vbios_mode->enh_table->flags & HBorder) ? 8 : 0; vborder = (vbios_mode->enh_table->flags & VBorder) ? 8 : 0; + adjusted_mode->crtc_hdisplay = vbios_mode->enh_table->hde; adjusted_mode->crtc_htotal = vbios_mode->enh_table->ht; adjusted_mode->crtc_hblank_start = vbios_mode->enh_table->hde + hborder; adjusted_mode->crtc_hblank_end = vbios_mode->enh_table->ht - hborder; @@ -191,6 +192,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format, vbios_mode->enh_table->hfp + vbios_mode->enh_table->hsync); + adjusted_mode->crtc_vdisplay = vbios_mode->enh_table->vde; adjusted_mode->crtc_vtotal = vbios_mode->enh_table->vt; adjusted_mode->crtc_vblank_start = vbios_mode->enh_table->vde + vborder; adjusted_mode->crtc_vblank_end = vbios_mode->enh_table->vt - vborder; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index 61f4a38e7d2b..8f786592143b 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data, ADV7511_AUDIO_CFG3_LEN_MASK, len); regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4); - regmap_write(adv7511->regmap, 0x73, 0x1); + + /* send current Audio infoframe values while updating */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, + BIT(5), BIT(5)); + + regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1); + + /* use Audio infoframe updated info */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, + BIT(5), 0); return 0; } @@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data) regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0), BIT(7) | BIT(6), BIT(7)); /* use Audio infoframe updated info */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1), + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, BIT(5), 0); + /* enable SPDIF receiver */ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF) regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 9f9874acfb2b..3e6fe8604959 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1225,8 +1225,8 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) ret = adv7511_init_regulators(adv7511); if (ret) { - dev_err(dev, "failed to init regulators\n"); - return ret; + dev_err_probe(dev, ret, "failed to init regulators\n"); + goto err_of_node_put; } /* @@ -1347,6 +1347,8 @@ err_i2c_unregister_edid: i2c_unregister_device(adv7511->i2c_edid); uninit_regulators: adv7511_uninit_regulators(adv7511); +err_of_node_put: + of_node_put(adv7511->host_node); return ret; } @@ -1355,6 +1357,8 @@ static void adv7511_remove(struct i2c_client *i2c) { struct adv7511 *adv7511 = i2c_get_clientdata(i2c); + of_node_put(adv7511->host_node); + adv7511_uninit_regulators(adv7511); drm_bridge_remove(&adv7511->bridge); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c index b8eeaf4736e7..6a4733c70827 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7533.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c @@ -146,16 +146,14 @@ int adv7533_attach_dsi(struct adv7511 *adv) }; host = of_find_mipi_dsi_host_by_node(adv->host_node); - if (!host) { - dev_err(dev, "failed to find dsi host\n"); - return -EPROBE_DEFER; - } + if (!host) + return dev_err_probe(dev, -EPROBE_DEFER, + "failed to find dsi host\n"); dsi = devm_mipi_dsi_device_register_full(dev, host, &info); - if (IS_ERR(dsi)) { - dev_err(dev, "failed to create dsi device\n"); - return PTR_ERR(dsi); - } + if (IS_ERR(dsi)) + return dev_err_probe(dev, PTR_ERR(dsi), + "failed to create dsi device\n"); adv->dsi = dsi; @@ -165,10 +163,8 @@ int adv7533_attach_dsi(struct adv7511 *adv) MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE; ret = devm_mipi_dsi_attach(dev, dsi); - if (ret < 0) { - dev_err(dev, "failed to attach dsi to host\n"); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "failed to attach dsi to host\n"); return 0; } @@ -179,7 +175,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) of_property_read_u32(np, "adi,dsi-lanes", &num_lanes); - if (num_lanes < 1 || num_lanes > 4) + if (num_lanes < 2 || num_lanes > 4) return -EINVAL; adv->num_dsi_lanes = num_lanes; @@ -188,8 +184,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv) if (!adv->host_node) return -ENODEV; - of_node_put(adv->host_node); - adv->use_timing_gen = !of_property_read_bool(np, "adi,disable-timing-generator"); diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 193015c75b45..e14c9fdabe2b 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2567,6 +2567,8 @@ static int __maybe_unused anx7625_runtime_pm_suspend(struct device *dev) mutex_lock(&ctx->lock); anx7625_stop_dp_work(ctx); + if (!ctx->pdata.panel_bridge) + anx7625_remove_edid(ctx); anx7625_power_standby(ctx); mutex_unlock(&ctx->lock); diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 292c4f6da04a..cc2bf9c14373 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -296,11 +296,11 @@ #define MAX_LANE_COUNT 4 #define MAX_LINK_RATE HBR #define AUTO_TRAIN_RETRY 3 -#define MAX_HDCP_DOWN_STREAM_COUNT 10 +#define MAX_HDCP_DOWN_STREAM_COUNT 127 #define MAX_CR_LEVEL 0x03 #define MAX_EQ_LEVEL 0x03 #define AUX_WAIT_TIMEOUT_MS 15 -#define AUX_FIFO_MAX_SIZE 32 +#define AUX_FIFO_MAX_SIZE 16 #define PIXEL_CLK_DELAY 1 #define PIXEL_CLK_INVERSE 0 #define ADJUST_PHASE_THRESHOLD 80000 @@ -2011,7 +2011,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) { struct device *dev = &it6505->client->dev; u8 av[5][4], bv[5][4]; - int i, err; + int i, err, retry; i = it6505_setup_sha1_input(it6505, it6505->sha1_input); if (i <= 0) { @@ -2020,22 +2020,29 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) } it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av); + /*1B-05 V' must retry 3 times */ + for (retry = 0; retry < 3; retry++) { + err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv, + sizeof(bv)); - err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv, - sizeof(bv)); + if (err < 0) { + dev_err(dev, "Read V' value Fail %d", retry); + continue; + } - if (err < 0) { - dev_err(dev, "Read V' value Fail"); - return false; + for (i = 0; i < 5; i++) + if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] || + bv[i][1] != av[i][2] || bv[i][0] != av[i][3]) + break; + + if (i == 5) { + DRM_DEV_DEBUG_DRIVER(dev, "V' all match!! %d", retry); + return true; + } } - for (i = 0; i < 5; i++) - if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] || - bv[i][1] != av[i][2] || bv[i][0] != av[i][3]) - return false; - - DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!"); - return true; + DRM_DEV_DEBUG_DRIVER(dev, "V' NOT match!! %d", retry); + return false; } static void it6505_hdcp_wait_ksv_list(struct work_struct *work) @@ -2069,15 +2076,12 @@ static void it6505_hdcp_wait_ksv_list(struct work_struct *work) ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505); DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s", ksv_list_check ? "pass" : "fail"); - if (ksv_list_check) { - it6505_set_bits(it6505, REG_HDCP_TRIGGER, - HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE); + + if (ksv_list_check) return; - } + timeout: - it6505_set_bits(it6505, REG_HDCP_TRIGGER, - HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL, - HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL); + it6505_start_hdcp(it6505); } static void it6505_hdcp_work(struct work_struct *work) @@ -2292,14 +2296,20 @@ static int it6505_process_hpd_irq(struct it6505 *it6505) DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector); if (dp_irq_vector & DP_CP_IRQ) { - it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ, - HDCP_TRIGGER_CPIRQ); - bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS); if (bstatus < 0) return bstatus; DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus); + + /*Check BSTATUS when recive CP_IRQ */ + if (bstatus & DP_BSTATUS_R0_PRIME_READY && + it6505->hdcp_status == HDCP_AUTH_GOING) + it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ, + HDCP_TRIGGER_CPIRQ); + else if (bstatus & (DP_BSTATUS_REAUTH_REQ | DP_BSTATUS_LINK_FAILURE) && + it6505->hdcp_status == HDCP_AUTH_DONE) + it6505_start_hdcp(it6505); } ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status); @@ -2419,7 +2429,11 @@ static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505) { struct device *dev = &it6505->client->dev; - DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt"); + DRM_DEV_DEBUG_DRIVER(dev, "HDCP repeater R0 event Interrupt"); + /* 1B01 HDCP encription should start when R0 is ready*/ + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE); + schedule_work(&it6505->hdcp_wait_ksv_list); } @@ -2560,10 +2574,10 @@ static int it6505_poweron(struct it6505 *it6505) /* time interval between OVDD and SYSRSTN at least be 10ms */ if (pdata->gpiod_reset) { usleep_range(10000, 20000); - gpiod_set_value_cansleep(pdata->gpiod_reset, 0); - usleep_range(1000, 2000); gpiod_set_value_cansleep(pdata->gpiod_reset, 1); - usleep_range(10000, 20000); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + usleep_range(25000, 35000); } it6505->powered = true; @@ -2589,7 +2603,7 @@ static int it6505_poweroff(struct it6505 *it6505) } if (pdata->gpiod_reset) - gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + gpiod_set_value_cansleep(pdata->gpiod_reset, 1); if (pdata->pwr18) { err = regulator_disable(pdata->pwr18); @@ -3050,7 +3064,7 @@ static int it6505_init_pdata(struct it6505 *it6505) return PTR_ERR(pdata->ovdd); } - pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(pdata->gpiod_reset)) { dev_err(dev, "gpiod_reset gpio not found"); return PTR_ERR(pdata->gpiod_reset); @@ -3349,6 +3363,7 @@ static const struct of_device_id it6505_of_match[] = { { .compatible = "ite,it6505" }, { } }; +MODULE_DEVICE_TABLE(of, it6505_of_match); static struct i2c_driver it6505_i2c_driver = { .driver = { diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 0af2bd8706e4..a6b3af902642 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1579,6 +1579,13 @@ static struct edid *tc_get_edid(struct drm_bridge *bridge, struct drm_connector *connector) { struct tc_data *tc = bridge_to_tc(bridge); + int ret; + + ret = tc_get_display_props(tc); + if (ret < 0) { + dev_err(tc->dev, "failed to read display props: %d\n", ret); + return 0; + } return drm_get_edid(connector, &tc->aux.ddc); } diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index aabdb5c74d93..ed8094b2a7ff 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -126,6 +126,9 @@ #define TC358768_DSI_CONFW_MODE_CLR (6 << 29) #define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24) +/* TC358768_DSICMD_TX (0x0600) register */ +#define TC358768_DSI_CMDTX_DC_START BIT(0) + static const char * const tc358768_supplies[] = { "vddc", "vddmipi", "vddio" }; @@ -230,6 +233,21 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask, tc358768_write(priv, reg, tmp); } +static void tc358768_dsicmd_tx(struct tc358768_priv *priv) +{ + u32 val; + + /* start transfer */ + tc358768_write(priv, TC358768_DSICMD_TX, TC358768_DSI_CMDTX_DC_START); + if (priv->error) + return; + + /* wait transfer completion */ + priv->error = regmap_read_poll_timeout(priv->regmap, TC358768_DSICMD_TX, val, + (val & TC358768_DSI_CMDTX_DC_START) == 0, + 100, 100000); +} + static int tc358768_sw_reset(struct tc358768_priv *priv) { /* Assert Reset */ @@ -517,8 +535,7 @@ static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host, } } - /* start transfer */ - tc358768_write(priv, TC358768_DSICMD_TX, 1); + tc358768_dsicmd_tx(priv); ret = tc358768_clear_error(priv); if (ret) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index ff4d0564122a..3c8e33f416e7 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -480,6 +480,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, const char *name) { struct device *dev = pdata->dev; + const struct i2c_client *client = to_i2c_client(dev); struct auxiliary_device *aux; int ret; @@ -488,6 +489,7 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata, return -ENOMEM; aux->name = name; + aux->id = (client->adapter->nr << 10) | client->addr; aux->dev.parent = dev; aux->dev.release = ti_sn65dsi86_aux_device_release; device_set_of_node_from_dev(&aux->dev, dev); diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c index ae39dc794190..868bf53db66c 100644 --- a/drivers/gpu/drm/display/drm_dp_cec.c +++ b/drivers/gpu/drm/display/drm_dp_cec.c @@ -310,16 +310,6 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) if (!aux->transfer) return; -#ifndef CONFIG_MEDIA_CEC_RC - /* - * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by - * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined. - * - * Do this here as well to ensure the tests against cec_caps are - * correct. - */ - cec_caps &= ~CEC_CAP_RC; -#endif cancel_delayed_work_sync(&aux->cec.unregister_work); mutex_lock(&aux->cec.lock); @@ -336,7 +326,9 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) num_las = CEC_MAX_LOG_ADDRS; if (aux->cec.adap) { - if (aux->cec.adap->capabilities == cec_caps && + /* Check if the adapter properties have changed */ + if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) == + (cec_caps & CEC_CAP_MONITOR_ALL) && aux->cec.adap->available_log_addrs == num_las) { /* Unchanged, so just set the phys addr */ cec_s_phys_addr_from_edid(aux->cec.adap, edid); diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c index bd61e20770a5..719da3610310 100644 --- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c @@ -160,11 +160,11 @@ EXPORT_SYMBOL(drm_dp_dual_mode_write); static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) { - static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = + static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN + 1] = "DP-HDMI ADAPTOR\x04"; return memcmp(hdmi_id, dp_dual_mode_hdmi_id, - sizeof(dp_dual_mode_hdmi_id)) == 0; + DP_DUAL_MODE_HDMI_ID_LEN) == 0; } static bool is_type1_adaptor(uint8_t adaptor_id) diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 4204d1f93013..a4d84a2c8b4e 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -178,13 +178,13 @@ static int drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) { int i; - u8 unpacked_rad[16]; + u8 unpacked_rad[16] = {}; - for (i = 0; i < lct; i++) { + for (i = 1; i < lct; i++) { if (i % 2) - unpacked_rad[i] = rad[i / 2] >> 4; + unpacked_rad[i] = rad[(i - 1) / 2] >> 4; else - unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); + unpacked_rad[i] = rad[(i - 1) / 2] & 0xF; } /* TODO: Eventually add something to printk so we can format the rad @@ -319,6 +319,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr hdr->broadcast = (buf[idx] >> 7) & 0x1; hdr->path_msg = (buf[idx] >> 6) & 0x1; hdr->msg_len = buf[idx] & 0x3f; + if (hdr->msg_len < 1) /* min space for body CRC */ + return false; + idx++; hdr->somt = (buf[idx] >> 7) & 0x1; hdr->eomt = (buf[idx] >> 6) & 0x1; @@ -3652,8 +3655,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms ret = 0; mgr->payload_id_table_cleared = false; - memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv)); - memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv)); + mgr->reset_rx_state = true; } out_unlock: @@ -3781,6 +3783,11 @@ out_fail: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); +static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg) +{ + memset(msg, 0, sizeof(*msg)); +} + static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, struct drm_dp_mst_branch **mstb) @@ -3859,6 +3866,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, return true; } +static int get_msg_request_type(u8 data) +{ + return data & 0x7f; +} + +static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr, + const struct drm_dp_sideband_msg_tx *txmsg, + const struct drm_dp_sideband_msg_rx *rxmsg) +{ + const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr; + const struct drm_dp_mst_branch *mstb = txmsg->dst; + int tx_req_type = get_msg_request_type(txmsg->msg[0]); + int rx_req_type = get_msg_request_type(rxmsg->msg[0]); + char rad_str[64]; + + if (tx_req_type == rx_req_type) + return true; + + drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str)); + drm_dbg_kms(mgr->dev, + "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n", + mstb, hdr->seqno, mstb->lct, rad_str, + drm_dp_mst_req_type_str(rx_req_type), rx_req_type, + drm_dp_mst_req_type_str(tx_req_type), tx_req_type); + + return false; +} + static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_sideband_msg_tx *txmsg; @@ -3888,6 +3923,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) goto out_clear_reply; } + if (!verify_rx_request_type(mgr, txmsg, msg)) + goto out_clear_reply; + drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply); if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { @@ -3921,6 +3959,22 @@ out: return 0; } +static bool primary_mstb_probing_is_done(struct drm_dp_mst_topology_mgr *mgr) +{ + bool probing_done = false; + + mutex_lock(&mgr->lock); + + if (mgr->mst_primary && drm_dp_mst_topology_try_get_mstb(mgr->mst_primary)) { + probing_done = mgr->mst_primary->link_address_sent; + drm_dp_mst_topology_put_mstb(mgr->mst_primary); + } + + mutex_unlock(&mgr->lock); + + return probing_done; +} + static inline bool drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_pending_up_req *up_req) @@ -3951,8 +4005,12 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { - dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); - hotplug = true; + if (!primary_mstb_probing_is_done(mgr)) { + drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.\n"); + } else { + dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } } drm_dp_mst_topology_put_mstb(mstb); @@ -3995,9 +4053,10 @@ static void drm_dp_mst_up_req_work(struct work_struct *work) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_pending_up_req *up_req; + struct drm_dp_mst_branch *mst_primary; if (!drm_dp_get_one_sb_msg(mgr, true, NULL)) - goto out; + goto out_clear_reply; if (!mgr->up_req_recv.have_eomt) return 0; @@ -4015,16 +4074,26 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n", up_req->msg.req_type); kfree(up_req); - goto out; + goto out_clear_reply; } - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + mutex_lock(&mgr->lock); + mst_primary = mgr->mst_primary; + if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) { + mutex_unlock(&mgr->lock); + kfree(up_req); + goto out_clear_reply; + } + mutex_unlock(&mgr->lock); + + drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type, false); + drm_dp_mst_topology_put_mstb(mst_primary); + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { const struct drm_dp_connection_status_notify *conn_stat = &up_req->msg.u.conn_stat; - bool handle_csn; drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", conn_stat->port_number, @@ -4033,16 +4102,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) conn_stat->message_capability_status, conn_stat->input_port, conn_stat->peer_device_type); - - mutex_lock(&mgr->probe_lock); - handle_csn = mgr->mst_primary->link_address_sent; - mutex_unlock(&mgr->probe_lock); - - if (!handle_csn) { - drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it."); - kfree(up_req); - goto out; - } } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { const struct drm_dp_resource_status_notify *res_stat = &up_req->msg.u.resource_stat; @@ -4057,12 +4116,22 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) list_add_tail(&up_req->next, &mgr->up_req_list); mutex_unlock(&mgr->up_req_lock); queue_work(system_long_wq, &mgr->up_req_work); - -out: +out_clear_reply: memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; } +static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->lock); + if (mgr->reset_rx_state) { + mgr->reset_rx_state = false; + reset_msg_rx_state(&mgr->down_rep_recv); + reset_msg_rx_state(&mgr->up_req_recv); + } + mutex_unlock(&mgr->lock); +} + /** * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event * @mgr: manager to notify irq for. @@ -4097,6 +4166,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u *handled = true; } + update_msg_rx_state(mgr); + if (esi[1] & DP_DOWN_REP_MSG_RDY) { ret = drm_dp_mst_handle_down_rep(mgr); *handled = true; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index b097bff1cd18..e737e45a3a70 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -573,6 +573,30 @@ mode_valid(struct drm_atomic_state *state) return 0; } +static int drm_atomic_check_valid_clones(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + struct drm_encoder *drm_enc; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + + drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) { + if (!drm_enc->possible_clones) { + DRM_DEBUG("enc%d possible_clones is 0\n", drm_enc->base.id); + continue; + } + + if ((crtc_state->encoder_mask & drm_enc->possible_clones) != + crtc_state->encoder_mask) { + DRM_DEBUG("crtc%d failed valid clone check for mask 0x%x\n", + crtc->base.id, crtc_state->encoder_mask); + return -EINVAL; + } + } + + return 0; +} + /** * drm_atomic_helper_check_modeset - validate state object for modeset changes * @dev: DRM device @@ -744,6 +768,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev, ret = drm_atomic_add_affected_planes(state, crtc); if (ret != 0) return ret; + + ret = drm_atomic_check_valid_clones(state, crtc); + if (ret != 0) + return ret; } /* @@ -1389,7 +1417,7 @@ crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) mode = &new_crtc_state->mode; adjusted_mode = &new_crtc_state->adjusted_mode; - if (!new_crtc_state->mode_changed) + if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) continue; drm_dbg_atomic(dev, "modeset on [ENCODER:%d:%s]\n", diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index b620a0da2d02..55b78b027e09 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -932,6 +932,10 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state, if (mode != DRM_MODE_DPMS_ON) mode = DRM_MODE_DPMS_OFF; + + if (connector->dpms == mode) + goto out; + connector->dpms = mode; crtc = connector->state->crtc; diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 3d18d840ef3b..e5c1572f720b 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -1100,6 +1100,10 @@ static const struct drm_prop_enum_list dp_colorspaces[] = { * callback. For atomic drivers the remapping to the "ACTIVE" property is * implemented in the DRM core. * + * On atomic drivers any DPMS setproperty ioctl where the value does not + * change is completely skipped, otherwise a full atomic commit will occur. + * On legacy drivers the exact behavior is driver specific. + * * Note that this property cannot be set through the MODE_ATOMIC ioctl, * userspace must use "ACTIVE" on the CRTC instead. * diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5ed77e3361fd..fc6028aae9bd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -6164,6 +6164,7 @@ static void drm_reset_display_info(struct drm_connector *connector) info->has_hdmi_infoframe = false; info->rgb_quant_range_selectable = false; memset(&info->hdmi, 0, sizeof(info->hdmi)); + memset(&connector->hdr_sink_metadata, 0, sizeof(connector->hdr_sink_metadata)); info->edid_hdmi_rgb444_dc_modes = 0; info->edid_hdmi_ycbcr444_dc_modes = 0; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 442746d9777a..a0abfb3f05f0 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1495,14 +1495,14 @@ int drm_fb_helper_set_par(struct fb_info *info) } EXPORT_SYMBOL(drm_fb_helper_set_par); -static void pan_set(struct drm_fb_helper *fb_helper, int x, int y) +static void pan_set(struct drm_fb_helper *fb_helper, int dx, int dy) { struct drm_mode_set *mode_set; mutex_lock(&fb_helper->client.modeset_mutex); drm_client_for_each_modeset(mode_set, &fb_helper->client) { - mode_set->x = x; - mode_set->y = y; + mode_set->x += dx; + mode_set->y += dy; } mutex_unlock(&fb_helper->client.modeset_mutex); } @@ -1511,16 +1511,18 @@ static int pan_display_atomic(struct fb_var_screeninfo *var, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; - int ret; + int ret, dx, dy; - pan_set(fb_helper, var->xoffset, var->yoffset); + dx = var->xoffset - info->var.xoffset; + dy = var->yoffset - info->var.yoffset; + pan_set(fb_helper, dx, dy); ret = drm_client_modeset_commit_locked(&fb_helper->client); if (!ret) { info->var.xoffset = var->xoffset; info->var.yoffset = var->yoffset; } else - pan_set(fb_helper, info->var.xoffset, info->var.yoffset); + pan_set(fb_helper, -dx, -dy); return ret; } diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 8257f9d4f619..22a373eaffef 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -151,7 +151,7 @@ static void show_leaks(struct drm_mm *mm) { } INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, u64, __subtree_last, - START, LAST, static inline, drm_mm_interval_tree) + START, LAST, static inline __maybe_unused, drm_mm_interval_tree) struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 304004fb80aa..8e6ec75dc687 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -808,14 +808,11 @@ EXPORT_SYMBOL(drm_mode_set_name); */ int drm_mode_vrefresh(const struct drm_display_mode *mode) { - unsigned int num, den; + unsigned int num = 1, den = 1; if (mode->htotal == 0 || mode->vtotal == 0) return 0; - num = mode->clock; - den = mode->htotal * mode->vtotal; - if (mode->flags & DRM_MODE_FLAG_INTERLACE) num *= 2; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) @@ -823,6 +820,12 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode) if (mode->vscan > 1) den *= mode->vscan; + if (check_mul_overflow(mode->clock, num, &num)) + return 0; + + if (check_mul_overflow(mode->htotal * mode->vtotal, den, &den)) + return 0; + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den); } EXPORT_SYMBOL(drm_mode_vrefresh); diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 7fd3de89ed07..acd29b4f43f8 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -49,7 +49,7 @@ static LIST_HEAD(panel_list); * @dev: parent device of the panel * @funcs: panel operations * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to - * the panel interface + * the panel interface (must NOT be DRM_MODE_CONNECTOR_Unknown) * * Initialize the panel structure for subsequent registration with * drm_panel_add(). @@ -57,6 +57,9 @@ static LIST_HEAD(panel_list); void drm_panel_init(struct drm_panel *panel, struct device *dev, const struct drm_panel_funcs *funcs, int connector_type) { + if (connector_type == DRM_MODE_CONNECTOR_Unknown) + DRM_WARN("%s: %s: a valid connector type is required!\n", __func__, dev_name(dev)); + INIT_LIST_HEAD(&panel->list); panel->dev = dev; panel->funcs = funcs; diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 5b2506c65e95..036b095c9888 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -93,6 +93,12 @@ static const struct drm_dmi_panel_orientation_data onegx1_pro = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd640x960_leftside_up = { + .width = 640, + .height = 960, + .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = { .width = 720, .height = 1280, @@ -123,6 +129,12 @@ static const struct drm_dmi_panel_orientation_data lcd1080x1920_rightside_up = { .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP, }; +static const struct drm_dmi_panel_orientation_data lcd1200x1920_leftside_up = { + .width = 1200, + .height = 1920, + .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP, +}; + static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = { .width = 1200, .height = 1920, @@ -184,6 +196,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO AYANEO 2/2S */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* AYA NEO 2021 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"), @@ -196,6 +214,30 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_PRODUCT_NAME, "AIR"), }, .driver_data = (void *)&lcd1080x1920_leftside_up, + }, { /* AYA NEO Flip DS Bottom Screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FLIP DS"), + }, + .driver_data = (void *)&lcd640x960_leftside_up, + }, { /* AYA NEO Flip KB/DS Top Screen */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "FLIP"), + }, + .driver_data = (void *)&lcd1080x1920_leftside_up, + }, { /* AYA NEO Founder */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYA NEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "AYA NEO Founder"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO GEEK */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "GEEK"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* AYA NEO NEXT */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), @@ -208,6 +250,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_BOARD_NAME, "KUN"), }, .driver_data = (void *)&lcd1600x2560_rightside_up, + }, { /* AYA NEO SLIDE */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"), + DMI_MATCH(DMI_PRODUCT_NAME, "SLIDE"), + }, + .driver_data = (void *)&lcd1080x1920_leftside_up, }, { /* AYN Loki Max */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ayn"), @@ -297,6 +345,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"), }, .driver_data = (void *)&gpd_win2, + }, { /* GPD Win 2 (correct DMI strings) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "WIN2") + }, + .driver_data = (void *)&lcd720x1280_rightside_up, }, { /* GPD Win 3 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), @@ -403,7 +457,6 @@ static const struct dmi_system_id orientation_data[] = { }, { /* Lenovo Yoga Tab 3 X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), - DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)&lcd1600x2560_rightside_up, @@ -426,6 +479,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), }, .driver_data = (void *)&lcd1600x2560_leftside_up, + }, { /* OneXPlayer Mini (Intel) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"), + }, + .driver_data = (void *)&lcd1200x1920_leftside_up, }, { /* OrangePi Neo */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "OrangePi"), diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index cf741c5c82d2..982174af74b1 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -481,7 +481,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, } else { CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_DEPTH | - VIVS_GL_FLUSH_CACHE_COLOR); + VIVS_GL_FLUSH_CACHE_COLOR | + VIVS_GL_FLUSH_CACHE_SHADER_L1); if (has_blt) { CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1); CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index f29952a55c05..b932a02efccd 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -519,6 +519,16 @@ static int etnaviv_bind(struct device *dev) priv->num_gpus = 0; priv->shm_gfp_mask = GFP_HIGHUSER | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + /* + * If the GPU is part of a system with DMA addressing limitations, + * request pages for our SHM backend buffers from the DMA32 zone to + * hopefully avoid performance killing SWIOTLB bounce buffering. + */ + if (dma_addressing_limited(dev)) { + priv->shm_gfp_mask |= GFP_DMA32; + priv->shm_gfp_mask &= ~__GFP_HIGHMEM; + } + priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev); if (IS_ERR(priv->cmdbuf_suballoc)) { dev_err(drm->dev, "Failed to create cmdbuf suballocator\n"); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c index 0edcf8ceb4a7..898f84a0fc30 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c @@ -83,10 +83,15 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter, { struct etnaviv_dump_registers *reg = iter->data; unsigned int i; + u32 read_addr; for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) { + read_addr = etnaviv_dump_registers[i]; + if (read_addr >= VIVS_PM_POWER_CONTROLS && + read_addr <= VIVS_PM_PULSE_EATER) + read_addr = gpu_fix_power_address(gpu, read_addr); reg->reg = cpu_to_le32(etnaviv_dump_registers[i]); - reg->value = cpu_to_le32(gpu_read(gpu, etnaviv_dump_registers[i])); + reg->value = cpu_to_le32(gpu_read(gpu, read_addr)); } etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 23d5058eca8d..740680205e8d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -342,6 +342,7 @@ void *etnaviv_gem_vmap(struct drm_gem_object *obj) static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) { struct page **pages; + pgprot_t prot; lockdep_assert_held(&obj->lock); @@ -349,8 +350,19 @@ static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj) if (IS_ERR(pages)) return NULL; - return vmap(pages, obj->base.size >> PAGE_SHIFT, - VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + switch (obj->flags & ETNA_BO_CACHE_MASK) { + case ETNA_BO_CACHED: + prot = PAGE_KERNEL; + break; + case ETNA_BO_UNCACHED: + prot = pgprot_noncached(PAGE_KERNEL); + break; + case ETNA_BO_WC: + default: + prot = pgprot_writecombine(PAGE_KERNEL); + } + + return vmap(pages, obj->base.size >> PAGE_SHIFT, VM_MAP, prot); } static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op) diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index f667e7906d1f..c0410e314dfa 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -590,7 +590,7 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) u32 pmc, ppc; /* enable clock gating */ - ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ @@ -598,9 +598,9 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) gpu->identity.revision == 0x4302) ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc); + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, ppc); - pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); + pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS); /* Disable PA clock gating for GC400+ without bugfix except for GC420 */ if (gpu->identity.model >= chipModel_GC400 && @@ -635,7 +635,7 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; - gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc); + gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, pmc); } void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) @@ -695,11 +695,11 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) (gpu->identity.features & chipFeatures_PIPE_3D)) { /* Performance fix: disable internal DFS */ - pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER); + pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER); pulse_eater |= BIT(18); } - gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater); + gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater); } static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) @@ -798,14 +798,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (ret) goto fail; - /* - * If the GPU is part of a system with DMA addressing limitations, - * request pages for our SHM backend buffers from the DMA32 zone to - * hopefully avoid performance killing SWIOTLB bounce buffering. - */ - if (dma_addressing_limited(gpu->dev)) - priv->shm_gfp_mask |= GFP_DMA32; - /* Create buffer: */ ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, PAGE_SIZE); @@ -1300,10 +1292,12 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, { u32 val; + mutex_lock(&gpu->lock); + /* disable clock gating */ - val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); /* enable debug register */ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); @@ -1311,6 +1305,8 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); + + mutex_unlock(&gpu->lock); } static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, @@ -1320,23 +1316,27 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, unsigned int i; u32 val; + mutex_lock(&gpu->lock); + sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); - for (i = 0; i < submit->nr_pmrs; i++) { - const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; - - *pmr->bo_vma = pmr->sequence; - } - /* disable debug register */ val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); /* enable clock gating */ - val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); + + mutex_unlock(&gpu->lock); + + for (i = 0; i < submit->nr_pmrs; i++) { + const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; + + *pmr->bo_vma = pmr->sequence; + } } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 85eddd492774..39f1e83d3cc7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -10,6 +10,7 @@ #include "etnaviv_gem.h" #include "etnaviv_mmu.h" #include "etnaviv_drv.h" +#include "common.xml.h" struct etnaviv_gem_submit; struct etnaviv_vram_mapping; @@ -159,6 +160,26 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) return readl(gpu->mmio + reg); } +static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) +{ + /* Power registers in GC300 < 2.0 are offset by 0x100 */ + if (gpu->identity.model == chipModel_GC300 && + gpu->identity.revision < 0x2000) + reg += 0x100; + + return reg; +} + +static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) +{ + writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg)); +} + +static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg) +{ + return readl(gpu->mmio + gpu_fix_power_address(gpu, reg)); +} + int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); int etnaviv_gpu_init(struct etnaviv_gpu *gpu); diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig index 5ca71ef87325..c9ee98693b48 100644 --- a/drivers/gpu/drm/fsl-dcu/Kconfig +++ b/drivers/gpu/drm/fsl-dcu/Kconfig @@ -8,6 +8,7 @@ config DRM_FSL_DCU select DRM_PANEL select REGMAP_MMIO select VIDEOMODE_HELPERS + select MFD_SYSCON if SOC_LS1021A help Choose this option if you have an Freescale DCU chipset. If M is selected the module will be called fsl-dcu-drm. diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index b4acc3422ba4..063a5e5bb737 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -100,6 +100,7 @@ static void fsl_dcu_irq_uninstall(struct drm_device *dev) static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) { struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + struct regmap *scfg; int ret; ret = fsl_dcu_drm_modeset_init(fsl_dev); @@ -108,6 +109,20 @@ static int fsl_dcu_load(struct drm_device *dev, unsigned long flags) return ret; } + scfg = syscon_regmap_lookup_by_compatible("fsl,ls1021a-scfg"); + if (PTR_ERR(scfg) != -ENODEV) { + /* + * For simplicity, enable the PIXCLK unconditionally, + * resulting in increased power consumption. Disabling + * the clock in PM or on unload could be implemented as + * a future improvement. + */ + ret = regmap_update_bits(scfg, SCFG_PIXCLKCR, SCFG_PIXCLKCR_PXCEN, + SCFG_PIXCLKCR_PXCEN); + if (ret < 0) + return dev_err_probe(dev->dev, ret, "failed to enable pixclk\n"); + } + ret = drm_vblank_init(dev, dev->mode_config.num_crtc); if (ret < 0) { dev_err(dev->dev, "failed to initialize vblank\n"); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h index e2049a0e8a92..566396013c04 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -160,6 +160,9 @@ #define FSL_DCU_ARGB4444 12 #define FSL_DCU_YUV422 14 +#define SCFG_PIXCLKCR 0x28 +#define SCFG_PIXCLKCR_PXCEN BIT(31) + #define VF610_LAYER_REG_NUM 9 #define LS1021A_LAYER_REG_NUM 10 diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c index 7e76790c6a81..cba97d7db131 100644 --- a/drivers/gpu/drm/gma500/mid_bios.c +++ b/drivers/gpu/drm/gma500/mid_bios.c @@ -279,6 +279,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv) 0, PCI_DEVFN(2, 0)); int ret = -1; + if (pci_gfx_root == NULL) { + WARN_ON(1); + return; + } + /* Get the address of the platform config vbt */ pci_read_config_dword(pci_gfx_root, 0xFC, &addr); pci_dev_put(pci_gfx_root); diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index 68050409dd26..499c7c8916df 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -157,6 +157,7 @@ static int hyperv_vmbus_probe(struct hv_device *hdev, return 0; err_free_mmio: + iounmap(hv->vram); vmbus_free_mmio(hv->mem->start, hv->fb_size); err_vmbus_close: vmbus_close(hdev->channel); @@ -175,6 +176,7 @@ static int hyperv_vmbus_remove(struct hv_device *hdev) vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); + iounmap(hv->vram); vmbus_free_mmio(hv->mem->start, hv->fb_size); return 0; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index fb8d1d63407a..4b5bf23f2336 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6691,12 +6691,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) { struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_plane_state *plane_state; struct intel_crtc_state *crtc_state; + struct intel_plane *plane; struct intel_crtc *crtc; u8 affected_pipes = 0; u8 modeset_pipes = 0; int i; + /* + * Any plane which is in use by the joiner needs its crtc. + * Pull those in first as this will not have happened yet + * if the plane remains disabled according to uapi. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + crtc = to_intel_crtc(plane_state->hw.crtc); + if (!crtc) + continue; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + /* Now pull in all joined crtcs */ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { affected_pipes |= crtc_state->bigjoiner_pipes; if (intel_crtc_needs_modeset(crtc_state)) diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index c69a638796c6..32e43ea5a422 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -1571,7 +1571,7 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * * arithmetic related to alignment and offset calculation. */ if (is_gen12_ccs_cc_plane(&fb->base, i)) { - if (IS_ALIGNED(fb->base.offsets[i], PAGE_SIZE)) + if (IS_ALIGNED(fb->base.offsets[i], 64)) continue; else return -EINVAL; diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index bc523a3d1d42..00daeacc2f13 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -100,8 +100,6 @@ static const u32 icl_sdr_y_plane_formats[] = { DRM_FORMAT_Y216, DRM_FORMAT_XYUV8888, DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, }; static const u32 icl_sdr_uv_plane_formats[] = { @@ -128,8 +126,6 @@ static const u32 icl_sdr_uv_plane_formats[] = { DRM_FORMAT_Y216, DRM_FORMAT_XYUV8888, DRM_FORMAT_XVYU2101010, - DRM_FORMAT_XVYU12_16161616, - DRM_FORMAT_XVYU16161616, }; static const u32 icl_hdr_plane_formats[] = { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 107f465a27b9..458320d28e27 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -339,6 +339,11 @@ struct intel_engine_guc_stats { * @start_gt_clk: GT clock time of last idle to active transition. */ u64 start_gt_clk; + + /** + * @total: The last value of total returned + */ + u64 total; }; struct intel_engine_cs { diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index f8d0523f4c18..d1dcb018117d 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -134,7 +134,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN9_MEDIA_PG_ENABLE | GEN11_MEDIA_SAMPLER_PG_ENABLE; - if (GRAPHICS_VER(gt->i915) >= 12) { + if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) { for (i = 0; i < I915_MAX_VCS; i++) if (HAS_ENGINE(gt, _VCS(i))) pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) | diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 56df4c4a8a1a..4047b01060d8 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1213,6 +1213,21 @@ static void __get_engine_usage_record(struct intel_engine_cs *engine, } while (++i < 6); } +static void __set_engine_usage_record(struct intel_engine_cs *engine, + u32 last_in, u32 id, u32 total) +{ + struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine); + +#define record_write(map_, field_, val_) \ + iosys_map_wr_field(map_, 0, struct guc_engine_usage_record, field_, val_) + + record_write(&rec_map, last_switch_in_stamp, last_in); + record_write(&rec_map, current_context_index, id); + record_write(&rec_map, total_runtime, total); + +#undef record_write +} + static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) { struct intel_engine_guc_stats *stats = &engine->stats.guc; @@ -1331,9 +1346,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) total += intel_gt_clock_interval_to_ns(gt, clk); } + if (total > stats->total) + stats->total = total; + spin_unlock_irqrestore(&guc->timestamp.lock, flags); - return ns_to_ktime(total); + return ns_to_ktime(stats->total); } static void __reset_guc_busyness_stats(struct intel_guc *guc) @@ -1350,8 +1368,21 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc) guc_update_pm_timestamp(guc, &unused); for_each_engine(engine, gt, id) { + struct intel_engine_guc_stats *stats = &engine->stats.guc; + guc_update_engine_gt_clks(engine); - engine->stats.guc.prev_total = 0; + + /* + * If resetting a running context, accumulate the active + * time as well since there will be no context switch. + */ + if (stats->running) { + u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk; + + stats->total_gt_clks += clk; + } + stats->prev_total = 0; + stats->running = 0; } spin_unlock_irqrestore(&guc->timestamp.lock, flags); @@ -1404,6 +1435,9 @@ static void guc_timestamp_ping(struct work_struct *wrk) static int guc_action_enable_usage_stats(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; u32 offset = intel_guc_engine_usage_offset(guc); u32 action[] = { INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF, @@ -1411,6 +1445,9 @@ static int guc_action_enable_usage_stats(struct intel_guc *guc) 0, }; + for_each_engine(engine, gt, id) + __set_engine_usage_record(engine, 0, 0xffffffff, 0); + return intel_guc_send(guc, action, ARRAY_SIZE(action)); } @@ -4727,12 +4764,20 @@ static inline void guc_log_context(struct drm_printer *p, { drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id); drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca); - drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", - ce->ring->head, - ce->lrc_reg_state[CTX_RING_HEAD]); - drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n", - ce->ring->tail, - ce->lrc_reg_state[CTX_RING_TAIL]); + if (intel_context_pin_if_active(ce)) { + drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n", + ce->ring->head, + ce->lrc_reg_state[CTX_RING_HEAD]); + drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n", + ce->ring->tail, + ce->lrc_reg_state[CTX_RING_TAIL]); + intel_context_unpin(ce); + } else { + drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n", + ce->ring->head); + drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n", + ce->ring->tail); + } drm_printf(p, "\t\tContext Pin Count: %u\n", atomic_read(&ce->pin_count)); drm_printf(p, "\t\tGuC ID Ref Count: %u\n", diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index d2bed466540a..3da476dec1da 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c @@ -222,7 +222,6 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) u8 *buf; struct opregion_header *header; struct vbt v; - const char opregion_signature[16] = OPREGION_SIGNATURE; gvt_dbg_core("init vgpu%d opregion\n", vgpu->id); vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL | @@ -236,8 +235,10 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu) /* emulated opregion with VBT mailbox only */ buf = (u8 *)vgpu_opregion(vgpu)->va; header = (struct opregion_header *)buf; - memcpy(header->signature, opregion_signature, - sizeof(opregion_signature)); + + static_assert(sizeof(header->signature) == sizeof(OPREGION_SIGNATURE) - 1); + memcpy(header->signature, OPREGION_SIGNATURE, sizeof(header->signature)); + header->size = 0x8; header->opregion_ver = 0x02000000; header->mboxes = MBOX_VBT; diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 762127dd56c5..70a854557e6e 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -506,6 +506,6 @@ int __init i915_scheduler_module_init(void) return 0; err_priorities: - kmem_cache_destroy(slab_priorities); + kmem_cache_destroy(slab_dependencies); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index e050a2de5fd1..e25f76b46b0a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -164,7 +164,7 @@ static int igt_ppgtt_alloc(void *arg) return PTR_ERR(ppgtt); if (!ppgtt->vm.allocate_va_range) - goto err_ppgtt_cleanup; + goto ppgtt_vm_put; /* * While we only allocate the page tables here and so we could @@ -232,7 +232,7 @@ err_ppgtt_cleanup: goto retry; } i915_gem_ww_ctx_fini(&ww); - +ppgtt_vm_put: i915_vm_put(&ppgtt->vm); return err; } diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c index 31267c00782f..af91e45b5d13 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-crtc.c +++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c @@ -206,15 +206,13 @@ int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm) if (crtc->irq < 0) return crtc->irq; - ret = request_irq(crtc->irq, dcss_crtc_irq_handler, - 0, "dcss_drm", crtc); + ret = request_irq(crtc->irq, dcss_crtc_irq_handler, IRQF_NO_AUTOEN, + "dcss_drm", crtc); if (ret) { dev_err(dcss->dev, "irq request failed with %d.\n", ret); return ret; } - disable_irq(crtc->irq); - return 0; } diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 89585b31b985..5f423a2e0ede 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -410,14 +410,12 @@ static int ipu_drm_bind(struct device *dev, struct device *master, void *data) } ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]); - ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0, - "imx_drm", ipu_crtc); + ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, + IRQF_NO_AUTOEN, "imx_drm", ipu_crtc); if (ret < 0) { dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); return ret; } - /* Only enable IRQ when we actually need it to trigger work. */ - disable_irq(ipu_crtc->irq); return 0; } diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 1c4482ad507d..78bdfd855d11 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -468,6 +468,7 @@ static const struct of_device_id mcde_of_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, mcde_of_match); static struct platform_driver mcde_driver = { .driver = { diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig index d1eededee943..d663869bfce1 100644 --- a/drivers/gpu/drm/mediatek/Kconfig +++ b/drivers/gpu/drm/mediatek/Kconfig @@ -11,9 +11,6 @@ config DRM_MEDIATEK select DRM_KMS_HELPER select DRM_MIPI_DSI select DRM_PANEL - select MEMORY - select MTK_SMI - select PHY_MTK_MIPI_DSI select VIDEOMODE_HELPERS help Choose this option if you have a Mediatek SoCs. @@ -24,7 +21,6 @@ config DRM_MEDIATEK config DRM_MEDIATEK_DP tristate "DRM DPTX Support for MediaTek SoCs" depends on DRM_MEDIATEK - select PHY_MTK_DP select DRM_DISPLAY_HELPER select DRM_DISPLAY_DP_HELPER select DRM_DP_AUX_BUS @@ -35,6 +31,5 @@ config DRM_MEDIATEK_HDMI tristate "DRM HDMI Support for Mediatek SoCs" depends on DRM_MEDIATEK select SND_SOC_HDMI_CODEC if SND_SOC - select PHY_MTK_HDMI help DRM/KMS HDMI driver for Mediatek SoCs diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index c24eeb7ffde7..6bf50a15c9b4 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -457,18 +457,16 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp, enum dp_pixelformat color_format) { u32 val; - - /* update MISC0 */ - mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034, - color_format << DP_TEST_COLOR_FORMAT_SHIFT, - DP_TEST_COLOR_FORMAT_MASK); + u32 misc0_color; switch (color_format) { case DP_PIXELFORMAT_YUV422: val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_YCBCR422; + misc0_color = DP_COLOR_FORMAT_YCbCr422; break; case DP_PIXELFORMAT_RGB: val = PIXEL_ENCODE_FORMAT_DP_ENC0_P0_RGB; + misc0_color = DP_COLOR_FORMAT_RGB; break; default: drm_warn(mtk_dp->drm_dev, "Unsupported color format: %d\n", @@ -476,6 +474,11 @@ static int mtk_dp_set_color_format(struct mtk_dp *mtk_dp, return -EINVAL; } + /* update MISC0 */ + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_3034, + misc0_color, + DP_TEST_COLOR_FORMAT_MASK); + mtk_dp_update_bits(mtk_dp, MTK_DP_ENC0_P0_303C, val, PIXEL_ENCODE_FORMAT_DP_ENC0_P0_MASK); return 0; @@ -1949,7 +1952,6 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge) struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); enum drm_connector_status ret = connector_status_disconnected; bool enabled = mtk_dp->enabled; - u8 sink_count = 0; if (!mtk_dp->train_info.cable_plugged_in) return ret; @@ -1971,8 +1973,8 @@ static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge) * function, we just need to check the HPD connection to check * whether we connect to a sink device. */ - drm_dp_dpcd_readb(&mtk_dp->aux, DP_SINK_COUNT, &sink_count); - if (DP_GET_SINK_COUNT(sink_count)) + + if (drm_dp_read_sink_count(&mtk_dp->aux) > 0) ret = connector_status_connected; if (!enabled) { @@ -2274,12 +2276,19 @@ mtk_dp_bridge_mode_valid(struct drm_bridge *bridge, { struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); u32 bpp = info->color_formats & DRM_COLOR_FORMAT_YCBCR422 ? 16 : 24; - u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) * - drm_dp_max_lane_count(mtk_dp->rx_cap), - drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) * - mtk_dp->max_lanes); + u32 lane_count_min = mtk_dp->train_info.lane_count; + u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) * + lane_count_min; - if (rate < mode->clock * bpp / 8) + /* + *FEC overhead is approximately 2.4% from DP 1.4a spec 2.2.1.4.2. + *The down-spread amplitude shall either be disabled (0.0%) or up + *to 0.5% from 1.4a 3.5.2.6. Add up to approximately 3% total overhead. + * + *Because rate is already divided by 10, + *mode->clock does not need to be multiplied by 10 + */ + if ((rate * 97 / 100) < (mode->clock * bpp / 8)) return MODE_CLOCK_HIGH; return MODE_OK; @@ -2320,10 +2329,9 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct drm_display_info *display_info = &conn_state->connector->display_info; - u32 rate = min_t(u32, drm_dp_max_link_rate(mtk_dp->rx_cap) * - drm_dp_max_lane_count(mtk_dp->rx_cap), - drm_dp_bw_code_to_link_rate(mtk_dp->max_linkrate) * - mtk_dp->max_lanes); + u32 lane_count_min = mtk_dp->train_info.lane_count; + u32 rate = drm_dp_bw_code_to_link_rate(mtk_dp->train_info.link_rate) * + lane_count_min; *num_input_fmts = 0; @@ -2332,8 +2340,8 @@ static u32 *mtk_dp_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, * datarate of YUV422 and sink device supports YUV422, we output YUV422 * format. Use this condition, we can support more resolution. */ - if ((rate < (mode->clock * 24 / 8)) && - (rate > (mode->clock * 16 / 8)) && + if (((rate * 97 / 100) < (mode->clock * 24 / 8)) && + ((rate * 97 / 100) > (mode->clock * 16 / 8)) && (display_info->color_formats & DRM_COLOR_FORMAT_YCBCR422)) { input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL); if (!input_fmts) diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 1f5d39a4077c..5ad9c384046c 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -125,14 +125,14 @@ struct mtk_dpi_yc_limit { * @is_ck_de_pol: Support CK/DE polarity. * @swap_input_support: Support input swap function. * @support_direct_pin: IP supports direct connection to dpi panels. - * @input_2pixel: Input pixel of dp_intf is 2 pixel per round, so enable this - * config to enable this feature. * @dimension_mask: Mask used for HWIDTH, HPORCH, VSYNC_WIDTH and VSYNC_PORCH * (no shift). * @hvsize_mask: Mask of HSIZE and VSIZE mask (no shift). * @channel_swap_shift: Shift value of channel swap. * @yuv422_en_bit: Enable bit of yuv422. * @csc_enable_bit: Enable bit of CSC. + * @input_2p_en_bit: Enable bit for input two pixel per round feature. + * If present, implies that the feature must be enabled. * @pixels_per_iter: Quantity of transferred pixels per iteration. */ struct mtk_dpi_conf { @@ -145,12 +145,12 @@ struct mtk_dpi_conf { bool is_ck_de_pol; bool swap_input_support; bool support_direct_pin; - bool input_2pixel; u32 dimension_mask; u32 hvsize_mask; u32 channel_swap_shift; u32 yuv422_en_bit; u32 csc_enable_bit; + u32 input_2p_en_bit; u32 pixels_per_iter; }; @@ -406,12 +406,13 @@ static void mtk_dpi_config_swap_input(struct mtk_dpi *dpi, bool enable) static void mtk_dpi_config_2n_h_fre(struct mtk_dpi *dpi) { - mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N); + if (dpi->conf->reg_h_fre_con) + mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, H_FRE_2N, H_FRE_2N); } static void mtk_dpi_config_disable_edge(struct mtk_dpi *dpi) { - if (dpi->conf->edge_sel_en) + if (dpi->conf->edge_sel_en && dpi->conf->reg_h_fre_con) mtk_dpi_mask(dpi, dpi->conf->reg_h_fre_con, 0, EDGE_SEL_EN); } @@ -463,6 +464,7 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi) mtk_dpi_disable(dpi); clk_disable_unprepare(dpi->pixel_clk); + clk_disable_unprepare(dpi->tvd_clk); clk_disable_unprepare(dpi->engine_clk); } @@ -479,6 +481,12 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) goto err_refcount; } + ret = clk_prepare_enable(dpi->tvd_clk); + if (ret) { + dev_err(dpi->dev, "Failed to enable tvd pll: %d\n", ret); + goto err_engine; + } + ret = clk_prepare_enable(dpi->pixel_clk); if (ret) { dev_err(dpi->dev, "Failed to enable pixel clock: %d\n", ret); @@ -488,6 +496,8 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi) return 0; err_pixel: + clk_disable_unprepare(dpi->tvd_clk); +err_engine: clk_disable_unprepare(dpi->engine_clk); err_refcount: dpi->refcount--; @@ -602,9 +612,9 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, mtk_dpi_dual_edge(dpi); mtk_dpi_config_disable_edge(dpi); } - if (dpi->conf->input_2pixel) { - mtk_dpi_mask(dpi, DPI_CON, DPINTF_INPUT_2P_EN, - DPINTF_INPUT_2P_EN); + if (dpi->conf->input_2p_en_bit) { + mtk_dpi_mask(dpi, DPI_CON, dpi->conf->input_2p_en_bit, + dpi->conf->input_2p_en_bit); } mtk_dpi_sw_reset(dpi, false); @@ -952,12 +962,12 @@ static const struct mtk_dpi_conf mt8195_dpintf_conf = { .output_fmts = mt8195_output_fmts, .num_output_fmts = ARRAY_SIZE(mt8195_output_fmts), .pixels_per_iter = 4, - .input_2pixel = true, .dimension_mask = DPINTF_HPW_MASK, .hvsize_mask = DPINTF_HSIZE_MASK, .channel_swap_shift = DPINTF_CH_SWAP, .yuv422_en_bit = DPINTF_YUV422_EN, .csc_enable_bit = DPINTF_CSC_ENABLE, + .input_2p_en_bit = DPINTF_INPUT_2P_EN, }; static int mtk_dpi_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 21e584038581..336a6ee792c6 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -119,7 +119,14 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, int ret; args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8); - args->size = args->pitch * args->height; + + /* + * Multiply 2 variables of different types, + * for example: args->size = args->spacing * args->height; + * may cause coverity issue with unintentional overflow. + */ + args->size = args->pitch; + args->size *= args->height; mtk_gem = mtk_drm_gem_create(dev, args->size, false); if (IS_ERR(mtk_gem)) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 30d361671aa9..fb062e52eb12 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -120,6 +120,7 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state, struct mtk_drm_gem_obj *mtk_gem; unsigned int pitch, format; dma_addr_t addr; + int offset; gem = fb->obj[0]; mtk_gem = to_mtk_gem_obj(gem); @@ -127,8 +128,16 @@ static void mtk_plane_update_new_state(struct drm_plane_state *new_state, pitch = fb->pitches[0]; format = fb->format->format; - addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; - addr += (new_state->src.y1 >> 16) * pitch; + /* + * Using dma_addr_t variable to calculate with multiplier of different types, + * for example: addr += (new_state->src.x1 >> 16) * fb->format->cpp[0]; + * may cause coverity issue with unintentional overflow. + */ + offset = (new_state->src.x1 >> 16) * fb->format->cpp[0]; + addr += offset; + offset = (new_state->src.y1 >> 16) * pitch; + addr += offset; + mtk_plane_state->pending.enable = true; mtk_plane_state->pending.pitch = pitch; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index d871b1dba083..0acfda47f002 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -1015,12 +1015,12 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct mtk_dsi *dsi = host_to_dsi(host); - u32 recv_cnt, i; + ssize_t recv_cnt; u8 read_data[16]; void *src_addr; u8 irq_flag = CMD_DONE_INT_FLAG; u32 dsi_mode; - int ret; + int ret, i; dsi_mode = readl(dsi->regs + DSI_MODE_CTRL); if (dsi_mode & MODE) { @@ -1069,7 +1069,7 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host, if (recv_cnt) memcpy(msg->rx_buf, src_addr, recv_cnt); - DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n", + DRM_INFO("dsi get %zd byte data from the panel address(0x%x)\n", recv_cnt, *((u8 *)(msg->tx_buf))); restore_dsi_mode: diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 6e8f99554f54..058b4c042e49 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -137,7 +137,7 @@ enum hdmi_aud_channel_swap_type { struct hdmi_audio_param { enum hdmi_audio_coding_type aud_codec; - enum hdmi_audio_sample_size aud_sampe_size; + enum hdmi_audio_sample_size aud_sample_size; enum hdmi_aud_input_type aud_input_type; enum hdmi_aud_i2s_fmt aud_i2s_fmt; enum hdmi_aud_mclk aud_mclk; @@ -173,6 +173,7 @@ struct mtk_hdmi { unsigned int sys_offset; void __iomem *regs; enum hdmi_colorspace csp; + struct platform_device *audio_pdev; struct hdmi_audio_param aud_param; bool audio_enable; bool powered; @@ -1074,7 +1075,7 @@ static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi) hdmi->csp = HDMI_COLORSPACE_RGB; aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + aud_param->aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; aud_param->aud_input_type = HDMI_AUD_INPUT_I2S; aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; aud_param->aud_mclk = HDMI_AUD_MCLK_128FS; @@ -1575,14 +1576,14 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, void *data, switch (daifmt->fmt) { case HDMI_I2S: hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S; hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT; hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS; break; case HDMI_SPDIF: hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM; - hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16; + hdmi_params.aud_sample_size = HDMI_AUDIO_SAMPLE_SIZE_16; hdmi_params.aud_input_type = HDMI_AUD_INPUT_SPDIF; break; default: @@ -1666,6 +1667,11 @@ static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = { .no_capture_mute = 1, }; +static void mtk_hdmi_unregister_audio_driver(void *data) +{ + platform_device_unregister(data); +} + static int mtk_hdmi_register_audio_driver(struct device *dev) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1675,13 +1681,20 @@ static int mtk_hdmi_register_audio_driver(struct device *dev) .i2s = 1, .data = hdmi, }; - struct platform_device *pdev; + int ret; - pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, - PLATFORM_DEVID_AUTO, &codec_data, - sizeof(codec_data)); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + hdmi->audio_pdev = platform_device_register_data(dev, + HDMI_CODEC_DRV_NAME, + PLATFORM_DEVID_AUTO, + &codec_data, + sizeof(codec_data)); + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + ret = devm_add_action_or_reset(dev, mtk_hdmi_unregister_audio_driver, + hdmi->audio_pdev); + if (ret) + return ret; DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME); return 0; diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c index 2a942dc6a6dc..2a82119eb58e 100644 --- a/drivers/gpu/drm/meson/meson_vclk.c +++ b/drivers/gpu/drm/meson/meson_vclk.c @@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq, FREQ_1000_1001(params[i].pixel_freq)); DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n", i, params[i].phy_freq, - FREQ_1000_1001(params[i].phy_freq/1000)*1000); + FREQ_1000_1001(params[i].phy_freq/10)*10); /* Match strict frequency */ if (phy_freq == params[i].phy_freq && vclk_freq == params[i].vclk_freq) return MODE_OK; /* Match 1000/1001 variant */ - if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) && + if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) && vclk_freq == FREQ_1000_1001(params[i].vclk_freq)) return MODE_OK; } @@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target, for (freq = 0 ; params[freq].pixel_freq ; ++freq) { if ((phy_freq == params[freq].phy_freq || - phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) && + phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) && (vclk_freq == params[freq].vclk_freq || vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) { if (vclk_freq != params[freq].vclk_freq) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 870252bef23f..9156e673d360 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1070,50 +1070,51 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; u32 val; + int ret; /* - * The GMU may still be in slumber unless the GPU started so check and - * skip putting it back into slumber if so + * GMU firmware's internal power state gets messed up if we send "prepare_slumber" hfi when + * oob_gpu handshake wasn't done after the last wake up. So do a dummy handshake here when + * required */ - val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE); + if (adreno_gpu->base.needs_hw_init) { + if (a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET)) + goto force_off; - if (val != 0xf) { - int ret = a6xx_gmu_wait_for_idle(gmu); - - /* If the GMU isn't responding assume it is hung */ - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } - - a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); - - /* tell the GMU we want to slumber */ - ret = a6xx_gmu_notify_slumber(gmu); - if (ret) { - a6xx_gmu_force_off(gmu); - return; - } - - ret = gmu_poll_timeout(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, - !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), - 100, 10000); - - /* - * Let the user know we failed to slumber but don't worry too - * much because we are powering down anyway - */ - - if (ret) - DRM_DEV_ERROR(gmu->dev, - "Unable to slumber GMU: status = 0%x/0%x\n", - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), - gmu_read(gmu, - REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); + a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET); } + ret = a6xx_gmu_wait_for_idle(gmu); + + /* If the GMU isn't responding assume it is hung */ + if (ret) + goto force_off; + + a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung); + + /* tell the GMU we want to slumber */ + ret = a6xx_gmu_notify_slumber(gmu); + if (ret) + goto force_off; + + ret = gmu_poll_timeout(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val, + !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB), + 100, 10000); + + /* + * Let the user know we failed to slumber but don't worry too + * much because we are powering down anyway + */ + + if (ret) + DRM_DEV_ERROR(gmu->dev, + "Unable to slumber GMU: status = 0%x/0%x\n", + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS), + gmu_read(gmu, + REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2)); + /* Turn off HFI */ a6xx_hfi_stop(gmu); @@ -1122,6 +1123,11 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu) /* Tell RPMh to power off the GPU */ a6xx_rpmh_stop(gmu); + + return; + +force_off: + a6xx_gmu_force_off(gmu); } @@ -1462,15 +1468,13 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, irq = platform_get_irq_byname(pdev, name); - ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu); + ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu); if (ret) { DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n", name, ret); return ret; } - disable_irq(irq); - return irq; } diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index d6a810b7cfa2..cdb4665b953c 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -2056,7 +2056,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) * to cause power supply issues: */ if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu)) - gpu->clamp_to_idle = true; + priv->gpu_clamp_to_idle = true; /* Check if there is a GMU phandle and set it up */ node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c index 1d9d83d7b99e..72f043732f93 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c @@ -80,7 +80,7 @@ static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms, mode = &state->adjusted_mode; - crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); + crtc_clk = (u64)mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode); drm_atomic_crtc_for_each_plane(plane, crtc) { pstate = to_dpu_plane_state(plane->state); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index ba8f2ba04629..5f8345016ffe 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -2059,6 +2059,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) } } + if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither) + phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL); + /* reset the merge 3D HW block */ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) { phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c index c8f14555834a..70a6dfe94faa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c @@ -46,6 +46,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, u32 slice_last_group_size; u32 det_thresh_flatness; bool is_cmd_mode = !(mode & DSC_MODE_VIDEO); + bool input_10_bits = dsc->bits_per_component == 10; DPU_REG_WRITE(c, DSC_COMMON_MODE, mode); @@ -62,7 +63,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc, data |= (dsc->line_buf_depth << 3); data |= (dsc->simple_422 << 2); data |= (dsc->convert_rgb << 1); - data |= dsc->bits_per_component; + data |= input_10_bits; DPU_REG_WRITE(c, DSC_ENC, data); diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index 1245c7aa49df..a2113d6a022b 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -410,10 +410,10 @@ static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio) safe_to_exit_level = 5; break; default: + safe_to_exit_level = 14; drm_dbg_dp(audio->drm_dev, "setting the default safe_to_exit_level = %u\n", safe_to_exit_level); - safe_to_exit_level = 14; break; } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index 3a1417397283..f9e6cbd9e2d2 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -100,17 +100,35 @@ static int dsi_mgr_setup_components(int id) int ret; if (!IS_BONDED_DSI()) { + /* + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + ret = msm_dsi_host_register(msm_dsi->host); if (ret) return ret; - - msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); } else if (other_dsi) { struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ? msm_dsi : other_dsi; struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ? other_dsi : msm_dsi; + + /* + * PLL0 is to drive both DSI link clocks in bonded DSI mode. + * + * Set the usecase before calling msm_dsi_host_register(), which would + * already program the PLL source mux based on a default usecase. + */ + msm_dsi_phy_set_usecase(clk_master_dsi->phy, + MSM_DSI_PHY_MASTER); + msm_dsi_phy_set_usecase(clk_slave_dsi->phy, + MSM_DSI_PHY_SLAVE); + msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); + msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); + /* Register slave host first, so that slave DSI device * has a chance to probe, and do not block the master * DSI device's probe. @@ -124,14 +142,6 @@ static int dsi_mgr_setup_components(int id) ret = msm_dsi_host_register(master_link_dsi->host); if (ret) return ret; - - /* PLL0 is to drive both 2 DSI link clocks in bonded DSI mode. */ - msm_dsi_phy_set_usecase(clk_master_dsi->phy, - MSM_DSI_PHY_MASTER); - msm_dsi_phy_set_usecase(clk_slave_dsi->phy, - MSM_DSI_PHY_SLAVE); - msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy); - msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy); } return 0; diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 95f4374ae21c..d6ecff0ab618 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -305,6 +305,7 @@ void msm_debugfs_init(struct drm_minor *minor) { struct drm_device *dev = minor->dev; struct msm_drm_private *priv = dev->dev_private; + struct dentry *gpu_devfreq; drm_debugfs_create_files(msm_debugfs_list, ARRAY_SIZE(msm_debugfs_list), @@ -325,6 +326,17 @@ void msm_debugfs_init(struct drm_minor *minor) debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root, dev, &shrink_fops); + gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root); + + debugfs_create_bool("idle_clamp",0600, gpu_devfreq, + &priv->gpu_clamp_to_idle); + + debugfs_create_u32("upthreshold",0600, gpu_devfreq, + &priv->gpu_devfreq_config.upthreshold); + + debugfs_create_u32("downdifferential",0600, gpu_devfreq, + &priv->gpu_devfreq_config.downdifferential); + if (priv->kms && priv->kms->funcs->debugfs_init) priv->kms->funcs->debugfs_init(priv->kms, minor); diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index d4e0ef608950..81deb112ea44 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -233,6 +234,14 @@ struct msm_drm_private { */ unsigned int hangcheck_period; + /** gpu_devfreq_config: Devfreq tuning config for the GPU. */ + struct devfreq_simple_ondemand_data gpu_devfreq_config; + + /** + * gpu_clamp_to_idle: Enable clamping to idle freq when inactive + */ + bool gpu_clamp_to_idle; + /** * disable_err_irq: * diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index b39cd332751d..5929ecaa1fcd 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -109,11 +109,15 @@ struct msm_gpu_devfreq { struct mutex lock; /** - * idle_constraint: + * idle_freq: * - * A PM QoS constraint to limit max freq while the GPU is idle. + * Shadow frequency used while the GPU is idle. From the PoV of + * the devfreq governor, we are continuing to sample busyness and + * adjust frequency while the GPU is idle, but we use this shadow + * value as the GPU is actually clamped to minimum frequency while + * it is inactive. */ - struct dev_pm_qos_request idle_freq; + unsigned long idle_freq; /** * boost_constraint: @@ -135,8 +139,6 @@ struct msm_gpu_devfreq { /** idle_time: Time of last transition to idle: */ ktime_t idle_time; - struct devfreq_dev_status average_status; - /** * idle_work: * @@ -275,9 +277,6 @@ struct msm_gpu { struct msm_gpu_state *crashstate; - /* Enable clamping to idle freq when inactive: */ - bool clamp_to_idle; - /* True if the hardware supports expanded apriv (a650 and newer) */ bool hw_apriv; diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 85c443a37e4e..4e73b915fe1a 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -33,6 +33,16 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp)); + /* + * If the GPU is idle, devfreq is not aware, so just stash + * the new target freq (to use when we return to active) + */ + if (df->idle_freq) { + df->idle_freq = *freq; + dev_pm_opp_put(opp); + return 0; + } + if (gpu->funcs->gpu_set_freq) { mutex_lock(&df->lock); gpu->funcs->gpu_set_freq(gpu, opp, df->suspended); @@ -48,15 +58,26 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, static unsigned long get_freq(struct msm_gpu *gpu) { + struct msm_gpu_devfreq *df = &gpu->devfreq; + + /* + * If the GPU is idle, use the shadow/saved freq to avoid + * confusing devfreq (which is unaware that we are switching + * to lowest freq until the device is active again) + */ + if (df->idle_freq) + return df->idle_freq; + if (gpu->funcs->gpu_get_freq) return gpu->funcs->gpu_get_freq(gpu); return clk_get_rate(gpu->core_clk); } -static void get_raw_dev_status(struct msm_gpu *gpu, +static int msm_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *status) { + struct msm_gpu *gpu = dev_to_gpu(dev); struct msm_gpu_devfreq *df = &gpu->devfreq; u64 busy_cycles, busy_time; unsigned long sample_rate; @@ -72,7 +93,7 @@ static void get_raw_dev_status(struct msm_gpu *gpu, if (df->suspended) { mutex_unlock(&df->lock); status->busy_time = 0; - return; + return 0; } busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate); @@ -87,71 +108,6 @@ static void get_raw_dev_status(struct msm_gpu *gpu, busy_time = ~0LU; status->busy_time = busy_time; -} - -static void update_average_dev_status(struct msm_gpu *gpu, - const struct devfreq_dev_status *raw) -{ - struct msm_gpu_devfreq *df = &gpu->devfreq; - const u32 polling_ms = df->devfreq->profile->polling_ms; - const u32 max_history_ms = polling_ms * 11 / 10; - struct devfreq_dev_status *avg = &df->average_status; - u64 avg_freq; - - /* simple_ondemand governor interacts poorly with gpu->clamp_to_idle. - * When we enforce the constraint on idle, it calls get_dev_status - * which would normally reset the stats. When we remove the - * constraint on active, it calls get_dev_status again where busy_time - * would be 0. - * - * To remedy this, we always return the average load over the past - * polling_ms. - */ - - /* raw is longer than polling_ms or avg has no history */ - if (div_u64(raw->total_time, USEC_PER_MSEC) >= polling_ms || - !avg->total_time) { - *avg = *raw; - return; - } - - /* Truncate the oldest history first. - * - * Because we keep the history with a single devfreq_dev_status, - * rather than a list of devfreq_dev_status, we have to assume freq - * and load are the same over avg->total_time. We can scale down - * avg->busy_time and avg->total_time by the same factor to drop - * history. - */ - if (div_u64(avg->total_time + raw->total_time, USEC_PER_MSEC) >= - max_history_ms) { - const u32 new_total_time = polling_ms * USEC_PER_MSEC - - raw->total_time; - avg->busy_time = div_u64( - mul_u32_u32(avg->busy_time, new_total_time), - avg->total_time); - avg->total_time = new_total_time; - } - - /* compute the average freq over avg->total_time + raw->total_time */ - avg_freq = mul_u32_u32(avg->current_frequency, avg->total_time); - avg_freq += mul_u32_u32(raw->current_frequency, raw->total_time); - do_div(avg_freq, avg->total_time + raw->total_time); - - avg->current_frequency = avg_freq; - avg->busy_time += raw->busy_time; - avg->total_time += raw->total_time; -} - -static int msm_devfreq_get_dev_status(struct device *dev, - struct devfreq_dev_status *status) -{ - struct msm_gpu *gpu = dev_to_gpu(dev); - struct devfreq_dev_status raw; - - get_raw_dev_status(gpu, &raw); - update_average_dev_status(gpu, &raw); - *status = gpu->devfreq.average_status; return 0; } @@ -183,6 +139,8 @@ static bool has_devfreq(struct msm_gpu *gpu) void msm_devfreq_init(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; + struct msm_drm_private *priv = gpu->dev->dev_private; + int ret; /* We need target support to do devfreq */ if (!gpu->funcs->gpu_busy) @@ -190,11 +148,12 @@ void msm_devfreq_init(struct msm_gpu *gpu) mutex_init(&df->lock); - dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq, - DEV_PM_QOS_MAX_FREQUENCY, - PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); - dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, - DEV_PM_QOS_MIN_FREQUENCY, 0); + ret = dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq, + DEV_PM_QOS_MIN_FREQUENCY, 0); + if (ret < 0) { + DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize QoS\n"); + return; + } msm_devfreq_profile.initial_freq = gpu->fast_rate; @@ -209,11 +168,10 @@ void msm_devfreq_init(struct msm_gpu *gpu) df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev, &msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND, - NULL); + &priv->gpu_devfreq_config); if (IS_ERR(df->devfreq)) { DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n"); - dev_pm_qos_remove_request(&df->idle_freq); dev_pm_qos_remove_request(&df->boost_freq); df->devfreq = NULL; return; @@ -255,7 +213,6 @@ void msm_devfreq_cleanup(struct msm_gpu *gpu) devfreq_cooling_unregister(gpu->cooling); dev_pm_qos_remove_request(&df->boost_freq); - dev_pm_qos_remove_request(&df->idle_freq); } void msm_devfreq_resume(struct msm_gpu *gpu) @@ -328,6 +285,7 @@ void msm_devfreq_active(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; unsigned int idle_time; + unsigned long target_freq; if (!has_devfreq(gpu)) return; @@ -337,8 +295,28 @@ void msm_devfreq_active(struct msm_gpu *gpu) */ cancel_idle_work(df); + /* + * Hold devfreq lock to synchronize with get_dev_status()/ + * target() callbacks + */ + mutex_lock(&df->devfreq->lock); + + target_freq = df->idle_freq; + idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time)); + df->idle_freq = 0; + + /* + * We could have become active again before the idle work had a + * chance to run, in which case the df->idle_freq would have + * still been zero. In this case, no need to change freq. + */ + if (target_freq) + msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); + + mutex_unlock(&df->devfreq->lock); + /* * If we've been idle for a significant fraction of a polling * interval, then we won't meet the threshold of busyness for @@ -347,9 +325,6 @@ void msm_devfreq_active(struct msm_gpu *gpu) if (idle_time > msm_devfreq_profile.polling_ms) { msm_devfreq_boost(gpu, 2); } - - dev_pm_qos_update_request(&df->idle_freq, - PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE); } @@ -358,11 +333,24 @@ static void msm_devfreq_idle_work(struct kthread_work *work) struct msm_gpu_devfreq *df = container_of(work, struct msm_gpu_devfreq, idle_work.work); struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); + struct msm_drm_private *priv = gpu->dev->dev_private; + unsigned long idle_freq, target_freq = 0; + + /* + * Hold devfreq lock to synchronize with get_dev_status()/ + * target() callbacks + */ + mutex_lock(&df->devfreq->lock); + + idle_freq = get_freq(gpu); + + if (priv->gpu_clamp_to_idle) + msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0); df->idle_time = ktime_get(); + df->idle_freq = idle_freq; - if (gpu->clamp_to_idle) - dev_pm_qos_update_request(&df->idle_freq, 0); + mutex_unlock(&df->devfreq->lock); } void msm_devfreq_idle(struct msm_gpu *gpu) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index f2dca41e46c5..2953d6a6d390 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -143,6 +143,9 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) nouveau_bo_del_io_reserve_lru(bo); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); + if (bo->base.import_attach) + drm_prime_gem_destroy(&bo->base, bo->sg); + /* * If nouveau_bo_new() allocated this buffer, the GEM object was never * initialized, so don't attempt to release it. diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index bdf5262ebd35..5a30d115525a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -755,7 +755,6 @@ nouveau_connector_force(struct drm_connector *connector) if (!nv_encoder) { NV_ERROR(drm, "can't find encoder to force %s on!\n", connector->name); - connector->status = connector_status_disconnected; return; } diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index abcac7db4347..c84f4b9dbb2c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -95,7 +95,7 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) while (!list_empty(&fctx->pending)) { fence = list_entry(fctx->pending.next, typeof(*fence), head); - if (error) + if (error && !dma_fence_is_signaled_locked(&fence->base)) dma_fence_set_error(&fence->base, error); if (nouveau_fence_signal(fence)) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index fab542a758ff..a14728427ee4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -87,9 +87,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; } - if (gem->import_attach) - drm_prime_gem_destroy(gem, nvbo->bo.sg); - ttm_bo_put(&nvbo->bo); pm_runtime_mark_last_busy(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index be6674fb1af7..d0bff13ac758 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -592,6 +592,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); struct mm_struct *mm = svmm->notifier.mm; + struct folio *folio; struct page *page; unsigned long start = args->p.addr; unsigned long notifier_seq; @@ -618,12 +619,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, ret = -EINVAL; goto out; } + folio = page_folio(page); mutex_lock(&svmm->mutex); if (!mmu_interval_read_retry(¬ifier->notifier, notifier_seq)) break; mutex_unlock(&svmm->mutex); + + folio_unlock(folio); + folio_put(folio); } /* Map the page on the GPU. */ @@ -639,8 +644,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm, ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL); mutex_unlock(&svmm->mutex); - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); out: mmu_interval_notifier_remove(¬ifier->notifier); diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index 050ca7eafac5..556e0f9026be 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -139,21 +139,13 @@ static bool omapdss_device_is_connected(struct omap_dss_device *dssdev) } int omapdss_device_connect(struct dss_device *dss, - struct omap_dss_device *src, struct omap_dss_device *dst) { - dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n", - src ? dev_name(src->dev) : "NULL", + dev_dbg(&dss->pdev->dev, "connect(%s)\n", dst ? dev_name(dst->dev) : "NULL"); - if (!dst) { - /* - * The destination is NULL when the source is connected to a - * bridge instead of a DSS device. Stop here, we will attach - * the bridge later when we will have a DRM encoder. - */ - return src && src->bridge ? 0 : -EINVAL; - } + if (!dst) + return -EINVAL; if (omapdss_device_is_connected(dst)) return -EBUSY; @@ -163,19 +155,14 @@ int omapdss_device_connect(struct dss_device *dss, return 0; } -void omapdss_device_disconnect(struct omap_dss_device *src, +void omapdss_device_disconnect(struct dss_device *dss, struct omap_dss_device *dst) { - struct dss_device *dss = src ? src->dss : dst->dss; - - dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n", - src ? dev_name(src->dev) : "NULL", + dev_dbg(&dss->pdev->dev, "disconnect(%s)\n", dst ? dev_name(dst->dev) : "NULL"); - if (!dst) { - WARN_ON(!src->bridge); + if (WARN_ON(!dst)) return; - } if (!dst->id && !omapdss_device_is_connected(dst)) { WARN_ON(1); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index 040d5a3e33d6..4c22c09c93d5 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -242,9 +242,8 @@ struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev); void omapdss_device_put(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node); int omapdss_device_connect(struct dss_device *dss, - struct omap_dss_device *src, struct omap_dss_device *dst); -void omapdss_device_disconnect(struct omap_dss_device *src, +void omapdss_device_disconnect(struct dss_device *dss, struct omap_dss_device *dst); int omap_dss_get_num_overlay_managers(void); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index cc3bb0f6435b..b6e461a3f7a2 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -307,7 +307,7 @@ static void omap_disconnect_pipelines(struct drm_device *ddev) for (i = 0; i < priv->num_pipes; i++) { struct omap_drm_pipeline *pipe = &priv->pipes[i]; - omapdss_device_disconnect(NULL, pipe->output); + omapdss_device_disconnect(priv->dss, pipe->output); omapdss_device_put(pipe->output); pipe->output = NULL; @@ -325,7 +325,7 @@ static int omap_connect_pipelines(struct drm_device *ddev) int r; for_each_dss_output(output) { - r = omapdss_device_connect(priv->dss, NULL, output); + r = omapdss_device_connect(priv->dss, output); if (r == -EPROBE_DEFER) { omapdss_device_put(output); return r; diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index cf571796fd26..8fdb12ce2f88 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1408,8 +1408,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, omap_obj = to_omap_bo(obj); - mutex_lock(&omap_obj->lock); - omap_obj->sgt = sgt; if (sgt->orig_nents == 1) { @@ -1424,21 +1422,17 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL); if (!pages) { omap_gem_free_object(obj); - obj = ERR_PTR(-ENOMEM); - goto done; + return ERR_PTR(-ENOMEM); } omap_obj->pages = pages; ret = drm_prime_sg_to_page_array(sgt, pages, npages); if (ret) { omap_gem_free_object(obj); - obj = ERR_PTR(-ENOMEM); - goto done; + return ERR_PTR(-ENOMEM); } } -done: - mutex_unlock(&omap_obj->lock); return obj; } diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 2c14779a39e8..1ef1b4c966d2 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1944,6 +1944,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('S', 'H', 'P', 0x1523, &sharp_lq140m1jw46.delay, "LQ140M1JW46"), EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"), + EDP_PANEL_ENTRY('S', 'T', 'A', 0x0004, &delay_200_500_e200, "116KHD024006"), EDP_PANEL_ENTRY('S', 'T', 'A', 0x0100, &delay_100_500_e200, "2081116HHD028001-51D"), { /* sentinal */ } diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c index ec2780be74d1..a92a7cd697f4 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c @@ -577,9 +577,9 @@ static int nt35950_probe(struct mipi_dsi_device *dsi) return dev_err_probe(dev, -EPROBE_DEFER, "Cannot get secondary DSI host\n"); nt->dsi[1] = mipi_dsi_device_register_full(dsi_r_host, info); - if (!nt->dsi[1]) { + if (IS_ERR(nt->dsi[1])) { dev_err(dev, "Cannot get secondary DSI node\n"); - return -ENODEV; + return PTR_ERR(nt->dsi[1]); } num_dsis++; } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 7b1750e79cbe..2b148593570d 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1252,27 +1252,28 @@ static const struct panel_desc auo_g070vvn01 = { }, }; -static const struct drm_display_mode auo_g101evn010_mode = { - .clock = 68930, - .hdisplay = 1280, - .hsync_start = 1280 + 82, - .hsync_end = 1280 + 82 + 2, - .htotal = 1280 + 82 + 2 + 84, - .vdisplay = 800, - .vsync_start = 800 + 8, - .vsync_end = 800 + 8 + 2, - .vtotal = 800 + 8 + 2 + 6, +static const struct display_timing auo_g101evn010_timing = { + .pixelclock = { 64000000, 68930000, 85000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 8, 64, 256 }, + .hback_porch = { 8, 64, 256 }, + .hsync_len = { 40, 168, 767 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 4, 8, 100 }, + .vback_porch = { 4, 8, 100 }, + .vsync_len = { 8, 16, 223 }, }; static const struct panel_desc auo_g101evn010 = { - .modes = &auo_g101evn010_mode, - .num_modes = 1, + .timings = &auo_g101evn010_timing, + .num_timings = 1, .bpc = 6, .size = { .width = 216, .height = 135, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, .connector_type = DRM_MODE_CONNECTOR_LVDS, }; @@ -4239,6 +4240,31 @@ static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; +static const struct drm_display_mode mchp_ac69t88a_mode = { + .clock = 25000, + .hdisplay = 800, + .hsync_start = 800 + 88, + .hsync_end = 800 + 88 + 5, + .htotal = 800 + 88 + 5 + 40, + .vdisplay = 480, + .vsync_start = 480 + 23, + .vsync_end = 480 + 23 + 5, + .vtotal = 480 + 23 + 5 + 1, +}; + +static const struct panel_desc mchp_ac69t88a = { + .modes = &mchp_ac69t88a_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 108, + .height = 65, + }, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode arm_rtsm_mode[] = { { .clock = 65000, @@ -4660,6 +4686,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "yes-optoelectronics,ytc700tlag-05-201c", .data = &yes_optoelectronics_ytc700tlag_05_201c, + }, { + .compatible = "microchip,ac69t88a", + .data = &mchp_ac69t88a, #endif /* !CONFIG_DRM_PANEL_SIMPLE_OF_ONLY */ }, { /* Must be the last entry */ diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 40b631445992..62722d9ff8eb 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -157,7 +157,6 @@ static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev) struct panfrost_model { const char *name; u32 id; - u32 id_mask; u64 features; u64 issues; struct { diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 621ff174dff3..0946a11835a4 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void r300_gpu_init(struct radeon_device *rdev) +/* rs400_gpu_init also calls this! */ +void r300_gpu_init(struct radeon_device *rdev) { uint32_t gb_tile_config, tmp; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 6cf54a747749..780352f794e9 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -2104,7 +2104,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } - offset = radeon_get_ib_value(p, idx+1) << 8; + offset = (u64)radeon_get_ib_value(p, idx+1) << 8; if (offset != track->vgt_strmout_bo_offset[idx_value]) { DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n", offset, track->vgt_strmout_bo_offset[idx_value]); diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 1e00f6b99f94..8f5e07834fcc 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev); */ extern int r300_init(struct radeon_device *rdev); extern void r300_fini(struct radeon_device *rdev); +extern void r300_gpu_init(struct radeon_device *rdev); extern int r300_suspend(struct radeon_device *rdev); extern int r300_resume(struct radeon_device *rdev); extern int r300_asic_reset(struct radeon_device *rdev, bool hard); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index ca4a36464340..cb01dc36b4b5 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -557,7 +557,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) { int session_idx = -1; bool destroyed = false, created = false, allocated = false; - uint32_t tmp, handle = 0; + uint32_t tmp = 0, handle = 0; uint32_t *size = &tmp; int i, r = 0; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 6383f7a34bd8..921076292356 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -255,8 +255,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev) static void rs400_gpu_init(struct radeon_device *rdev) { - /* FIXME: is this correct ? */ - r420_pipes_init(rdev); + /* Earlier code was calling r420_pipes_init and then + * rs400_mc_wait_for_idle(rdev). The problem is that + * at least on my Mobility Radeon Xpress 200M RC410 card + * that ends up in this code path ends up num_gb_pipes == 3 + * while the card seems to have only one pipe. With the + * r420 pipe initialization method. + * + * Problems shown up as HyperZ glitches, see: + * https://bugs.freedesktop.org/show_bug.cgi?id=110897 + * + * Delegating initialization to r300 code seems to work + * and results in proper pipe numbers. The rs400 cards + * are said to be not r400, but r300 kind of cards. + */ + r300_gpu_init(rdev); + if (rs400_mc_wait_for_idle(rdev)) { pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n", RREG32(RADEON_MC_STATUS)); diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c index a7f2b7f66a17..9ec9c43971df 100644 --- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c @@ -396,7 +396,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, for (timeout = 10; timeout > 0; --timeout) { if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) && (rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) && - (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK)) + (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK_PHY)) break; usleep_range(1000, 2000); diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h index 2eaca54636f3..1f1eb46c721f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h @@ -141,7 +141,6 @@ #define CLOCKSET1 0x101c #define CLOCKSET1_LOCK_PHY (1 << 17) -#define CLOCKSET1_LOCK (1 << 16) #define CLOCKSET1_CLKSEL (1 << 8) #define CLOCKSET1_CLKINSEL_EXTAL (0 << 2) #define CLOCKSET1_CLKINSEL_DIG (1 << 2) diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index eef28921d41d..4de0c1b05663 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1070,9 +1070,6 @@ static void cdn_dp_pd_event_work(struct work_struct *work) { struct cdn_dp_device *dp = container_of(to_delayed_work(work), struct cdn_dp_device, event_work); - struct drm_connector *connector = &dp->connector; - enum drm_connector_status old_status; - int ret; mutex_lock(&dp->lock); @@ -1134,11 +1131,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work) out: mutex_unlock(&dp->lock); - - old_status = connector->status; - connector->status = connector->funcs->detect(connector, false); - if (old_status != connector->status) - drm_kms_helper_hotplug_event(dp->drm_dev); + drm_connector_helper_hpd_irq_event(&dp->connector); } static ssize_t cdn_dp_aux_transfer(struct drm_dp_aux *aux, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 8bd377c3fdcd..3fb7302a9ceb 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -275,6 +275,7 @@ struct rockchip_crtc_state { int output_flags; int data_map_mode; bool enable_afbc; + bool yuv_overlay; /** * @splice_mode: enabled when display a hdisplay > 4096 on rk3588 */ @@ -320,7 +321,6 @@ struct rockchip_crtc_state { u32 output_if_left_panel; u32 bus_format; u32 bus_flags; - int yuv_overlay; int post_r2y_en; int post_y2r_en; int post_csc_mode; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index 32e6b90c8d1e..fe69bacc3390 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -405,6 +405,7 @@ enum dst_factor_mode { #define VOP2_COLOR_KEY_MASK BIT(31) #define RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD BIT(28) +#define RK3568_OVL_CTRL__YUV_MODE(vp) BIT(vp) #define RK3568_VP_BG_MIX_CTRL__BG_DLY GENMASK(31, 24) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h index 3143ecaaff86..f7f10e97ac05 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h +++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h @@ -21,7 +21,7 @@ * */ -#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) #define _GPU_SCHED_TRACE_H_ #include @@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep, __entry->seqno) ); -#endif +#endif /* _GPU_SCHED_TRACE_H_ */ /* This part must be outside protection */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index f138b3be1646..dbdd00c61315 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -595,6 +595,14 @@ int drm_sched_job_init(struct drm_sched_job *job, if (!entity->rq) return -ENOENT; + /* + * We don't know for sure how the user has allocated. Thus, zero the + * struct so that unallowed (i.e., too early) usage of pointers that + * this function does not set is guaranteed to lead to a NULL pointer + * exception instead of UB. + */ + memset(job, 0, sizeof(*job)); + job->entity = entity; job->s_fence = drm_sched_fence_alloc(entity, owner); if (!job->s_fence) diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile index f203ac5514ae..f778a4eee7c9 100644 --- a/drivers/gpu/drm/sti/Makefile +++ b/drivers/gpu/drm/sti/Makefile @@ -7,8 +7,6 @@ sti-drm-y := \ sti_compositor.o \ sti_crtc.o \ sti_plane.o \ - sti_crtc.o \ - sti_plane.o \ sti_hdmi.o \ sti_hdmi_tx3g4c28phy.o \ sti_dvo.o \ diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index db0a1eb53532..c59fcb4dca32 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -200,6 +200,9 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane, return 0; crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 43c72c2604a0..f046f5f7ad25 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -638,6 +638,9 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, mixer = to_sti_mixer(crtc); crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 02b77279f6e4..adf9a25b2f1d 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1037,6 +1037,9 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane, return 0; crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + mode = &crtc_state->mode; dst_x = new_plane_state->crtc_x; dst_y = new_plane_state->crtc_y; diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c index 7e5f14646625..06c1b81912f7 100644 --- a/drivers/gpu/drm/sti/sti_mixer.c +++ b/drivers/gpu/drm/sti/sti_mixer.c @@ -137,7 +137,7 @@ static void mixer_dbg_crb(struct seq_file *s, int val) } } -static void mixer_dbg_mxn(struct seq_file *s, void *addr) +static void mixer_dbg_mxn(struct seq_file *s, void __iomem *addr) { int i; diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 95b75236fe5e..38b2ae0d7ec1 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -599,7 +599,7 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask) { dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc); - /* clear the irqstatus for newly enabled irqs */ + /* clear the irqstatus for irqs that will be enabled */ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask); dispc_k2g_vp_set_irqenable(dispc, 0, mask); @@ -607,6 +607,9 @@ void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask) dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7)); + /* clear the irqstatus for irqs that were disabled */ + dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & old_mask); + /* flush posted write */ dispc_k2g_read_irqenable(dispc); } @@ -679,24 +682,20 @@ static void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask) { unsigned int i; - u32 top_clear = 0; for (i = 0; i < dispc->feat->num_vps; ++i) { - if (clearmask & DSS_IRQ_VP_MASK(i)) { + if (clearmask & DSS_IRQ_VP_MASK(i)) dispc_k3_vp_write_irqstatus(dispc, i, clearmask); - top_clear |= BIT(i); - } } for (i = 0; i < dispc->feat->num_planes; ++i) { - if (clearmask & DSS_IRQ_PLANE_MASK(i)) { + if (clearmask & DSS_IRQ_PLANE_MASK(i)) dispc_k3_vid_write_irqstatus(dispc, i, clearmask); - top_clear |= BIT(4 + i); - } } if (dispc->feat->subrev == DISPC_K2G) return; - dispc_write(dispc, DISPC_IRQSTATUS, top_clear); + /* always clear the top level irqstatus */ + dispc_write(dispc, DISPC_IRQSTATUS, dispc_read(dispc, DISPC_IRQSTATUS)); /* Flush posted writes */ dispc_read(dispc, DISPC_IRQSTATUS); @@ -742,7 +741,7 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc, old_mask = dispc_k3_read_irqenable(dispc); - /* clear the irqstatus for newly enabled irqs */ + /* clear the irqstatus for irqs that will be enabled */ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask); for (i = 0; i < dispc->feat->num_vps; ++i) { @@ -767,6 +766,9 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc, if (main_disable) dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable); + /* clear the irqstatus for irqs that were disabled */ + dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & old_mask); + /* Flush posted writes */ dispc_read(dispc, DISPC_IRQENABLE_SET); } @@ -2653,14 +2655,32 @@ static void dispc_init_errata(struct dispc_device *dispc) } } +/* + * K2G display controller does not support soft reset, so we do a basic manual + * reset here: make sure the IRQs are masked and VPs are disabled. + */ +static void dispc_softreset_k2g(struct dispc_device *dispc) +{ + unsigned long flags; + + spin_lock_irqsave(&dispc->tidss->wait_lock, flags); + dispc_set_irqenable(dispc, 0); + dispc_read_and_clear_irqstatus(dispc); + spin_unlock_irqrestore(&dispc->tidss->wait_lock, flags); + + for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx) + VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0); +} + static int dispc_softreset(struct dispc_device *dispc) { u32 val; int ret = 0; - /* K2G display controller does not support soft reset */ - if (dispc->feat->subrev == DISPC_K2G) + if (dispc->feat->subrev == DISPC_K2G) { + dispc_softreset_k2g(dispc); return 0; + } /* Soft reset */ REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c index 0c681c7600bc..f13c7e434f8e 100644 --- a/drivers/gpu/drm/tidss/tidss_irq.c +++ b/drivers/gpu/drm/tidss/tidss_irq.c @@ -60,7 +60,9 @@ static irqreturn_t tidss_irq_handler(int irq, void *arg) unsigned int id; dispc_irq_t irqstatus; + spin_lock(&tidss->wait_lock); irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc); + spin_unlock(&tidss->wait_lock); for (id = 0; id < tidss->num_crtcs; id++) { struct drm_crtc *crtc = tidss->crtcs[id]; diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c index 7e2b0e224135..ebb9b1f782f9 100644 --- a/drivers/gpu/drm/tiny/repaper.c +++ b/drivers/gpu/drm/tiny/repaper.c @@ -455,7 +455,7 @@ static void repaper_frame_fixed_repeat(struct repaper_epd *epd, u8 fixed_value, enum repaper_stage stage) { u64 start = local_clock(); - u64 end = start + (epd->factored_stage_time * 1000 * 1000); + u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000); do { repaper_frame_fixed(epd, fixed_value, stage); @@ -466,7 +466,7 @@ static void repaper_frame_data_repeat(struct repaper_epd *epd, const u8 *image, const u8 *mask, enum repaper_stage stage) { u64 start = local_clock(); - u64 end = start + (epd->factored_stage_time * 1000 * 1000); + u64 end = start + ((u64)epd->factored_stage_time * 1000 * 1000); do { repaper_frame_data(epd, image, mask, stage); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fa04e62202c1..dcb2c23dc6de 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -274,7 +274,13 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, enum ttm_caching caching; man = ttm_manager_type(bo->bdev, res->mem_type); - caching = man->use_tt ? bo->ttm->caching : res->bus.caching; + if (man->use_tt) { + caching = bo->ttm->caching; + if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED) + tmp = pgprot_decrypted(tmp); + } else { + caching = res->bus.caching; + } return ttm_prot_from_caching(caching, tmp); } @@ -317,6 +323,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, .no_wait_gpu = false }; struct ttm_tt *ttm = bo->ttm; + struct ttm_resource_manager *man = + ttm_manager_type(bo->bdev, bo->resource->mem_type); pgprot_t prot; int ret; @@ -326,7 +334,8 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, if (ret) return ret; - if (num_pages == 1 && ttm->caching == ttm_cached) { + if (num_pages == 1 && ttm->caching == ttm_cached && + !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) { /* * We're mapping a single page, and the desired * page protection is consistent with the bo. diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index d505603930a7..d3190aa66695 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -31,11 +31,13 @@ #define pr_fmt(fmt) "[TTM] " fmt +#include #include #include #include #include #include +#include #include #include "ttm_module.h" @@ -59,6 +61,7 @@ static atomic_long_t ttm_dma32_pages_allocated; int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) { struct ttm_device *bdev = bo->bdev; + struct drm_device *ddev = bo->base.dev; uint32_t page_flags = 0; dma_resv_assert_held(bo->base.resv); @@ -80,6 +83,15 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) pr_err("Illegal buffer object type\n"); return -EINVAL; } + /* + * When using dma_alloc_coherent with memory encryption the + * mapped TT pages need to be decrypted or otherwise the drivers + * will end up sending encrypted mem to the gpu. + */ + if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { + page_flags |= TTM_TT_FLAG_DECRYPTED; + drm_info_once(ddev, "TT memory decryption enabled."); + } bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); if (unlikely(bo->ttm == NULL)) diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index e714d5318f30..b2d59a168697 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -102,7 +102,10 @@ v3d_irq(int irq, void *arg) to_v3d_fence(v3d->bin_job->base.irq_fence); trace_v3d_bcl_irq(&v3d->drm, fence->seqno); + + v3d->bin_job = NULL; dma_fence_signal(&fence->base); + status = IRQ_HANDLED; } @@ -111,7 +114,10 @@ v3d_irq(int irq, void *arg) to_v3d_fence(v3d->render_job->base.irq_fence); trace_v3d_rcl_irq(&v3d->drm, fence->seqno); + + v3d->render_job = NULL; dma_fence_signal(&fence->base); + status = IRQ_HANDLED; } @@ -120,7 +126,10 @@ v3d_irq(int irq, void *arg) to_v3d_fence(v3d->csd_job->base.irq_fence); trace_v3d_csd_irq(&v3d->drm, fence->seqno); + + v3d->csd_job = NULL; dma_fence_signal(&fence->base); + status = IRQ_HANDLED; } @@ -156,7 +165,10 @@ v3d_hub_irq(int irq, void *arg) to_v3d_fence(v3d->tfu_job->base.irq_fence); trace_v3d_tfu_irq(&v3d->drm, fence->seqno); + + v3d->tfu_job = NULL; dma_fence_signal(&fence->base); + status = IRQ_HANDLED; } diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 5a453532901f..166d4a88daee 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -34,32 +34,23 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d) { int ret; - /* Make sure that another flush isn't already running when we - * start this one. - */ - ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & - V3D_MMU_CTL_TLB_CLEARING), 100); - if (ret) - dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n"); + V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_FLUSH | + V3D_MMUC_CONTROL_ENABLE); + + ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & + V3D_MMUC_CONTROL_FLUSHING), 100); + if (ret) { + dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); + return ret; + } V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | V3D_MMU_CTL_TLB_CLEAR); - V3D_WRITE(V3D_MMUC_CONTROL, - V3D_MMUC_CONTROL_FLUSH | - V3D_MMUC_CONTROL_ENABLE); - ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & V3D_MMU_CTL_TLB_CLEARING), 100); - if (ret) { - dev_err(v3d->drm.dev, "TLB clear wait idle failed\n"); - return ret; - } - - ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & - V3D_MMUC_CONTROL_FLUSHING), 100); if (ret) - dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); + dev_err(v3d->drm.dev, "MMU TLB clear wait idle failed\n"); return ret; } diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index cb56627328bb..ec99d644cbb0 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -49,9 +49,9 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon) V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel); } + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask); V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask); - V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); v3d->active_perfmon = perfmon; } @@ -175,6 +175,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, { struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_perfmon_destroy *req = data; + struct v3d_dev *v3d = v3d_priv->v3d; struct v3d_perfmon *perfmon; mutex_lock(&v3d_priv->perfmon.lock); @@ -184,6 +185,10 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, if (!perfmon) return -EINVAL; + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == v3d->active_perfmon) + v3d_perfmon_stop(v3d, perfmon, false); + v3d_perfmon_put(perfmon); return 0; diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 06238e6d7f5c..41493cf3d03b 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -179,11 +179,15 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) struct drm_device *dev = &v3d->drm; struct dma_fence *fence; + if (unlikely(job->base.base.s_fence->finished.error)) + return NULL; + + v3d->tfu_job = job; + fence = v3d_fence_create(v3d, V3D_TFU); if (IS_ERR(fence)) return NULL; - v3d->tfu_job = job; if (job->base.irq_fence) dma_fence_put(job->base.irq_fence); job->base.irq_fence = dma_fence_get(fence); @@ -217,6 +221,9 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) struct dma_fence *fence; int i; + if (unlikely(job->base.base.s_fence->finished.error)) + return NULL; + v3d->csd_job = job; v3d_invalidate_caches(v3d); @@ -282,11 +289,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) return DRM_GPU_SCHED_STAT_NOMINAL; } -/* If the current address or return address have changed, then the GPU - * has probably made progress and we should delay the reset. This - * could fail if the GPU got in an infinite loop in the CL, but that - * is pretty unlikely outside of an i-g-t testcase. - */ +static void +v3d_sched_skip_reset(struct drm_sched_job *sched_job) +{ + struct drm_gpu_scheduler *sched = sched_job->sched; + + spin_lock(&sched->job_list_lock); + list_add(&sched_job->list, &sched->pending_list); + spin_unlock(&sched->job_list_lock); +} + static enum drm_gpu_sched_stat v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 *timedout_ctca, u32 *timedout_ctra) @@ -296,9 +308,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + /* If the current address or return address have changed, then the GPU + * has probably made progress and we should delay the reset. This + * could fail if the GPU got in an infinite loop in the CL, but that + * is pretty unlikely outside of an i-g-t testcase. + */ if (*timedout_ctca != ctca || *timedout_ctra != ctra) { *timedout_ctca = ctca; *timedout_ctra = ctra; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -338,11 +357,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) struct v3d_dev *v3d = job->base.v3d; u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4); - /* If we've made progress, skip reset and let the timer get - * rearmed. + /* If we've made progress, skip reset, add the job to the pending + * list, and let the timer get rearmed. */ if (job->timedout_batches != batches) { job->timedout_batches = batches; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 418a8242691f..7700e8dfd5f2 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -323,6 +323,7 @@ struct vc4_hvs { struct platform_device *pdev; void __iomem *regs; u32 __iomem *dlist; + unsigned int dlist_mem_size; struct clk *core_clk; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 971801acbde6..3573db34a26b 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -170,6 +170,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) if (!drm_dev_enter(drm, &idx)) return -ENODEV; + WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); + drm_print_regset32(&p, &vc4_hdmi->hdmi_regset); drm_print_regset32(&p, &vc4_hdmi->hd_regset); drm_print_regset32(&p, &vc4_hdmi->cec_regset); @@ -179,6 +181,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) drm_print_regset32(&p, &vc4_hdmi->ram_regset); drm_print_regset32(&p, &vc4_hdmi->rm_regset); + pm_runtime_put(&vc4_hdmi->pdev->dev); + drm_dev_exit(idx); return 0; @@ -2156,7 +2160,7 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data) } if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) { - ret = -ENODEV; + ret = -ENOTSUPP; goto out_dev_exit; } diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 47990ecbfc4d..88442cb65915 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -108,7 +108,8 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; struct drm_printer p = drm_seq_file_printer(m); - unsigned int next_entry_start = 0; + unsigned int dlist_mem_size = hvs->dlist_mem_size; + unsigned int next_entry_start; unsigned int i, j; u32 dlist_word, dispstat; @@ -122,8 +123,9 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) } drm_printf(&p, "HVS chan %u:\n", i); + next_entry_start = 0; - for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) { + for (j = HVS_READ(SCALER_DISPLISTX(i)); j < dlist_mem_size; j++) { dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j); drm_printf(&p, "dlist: %02d: 0x%08x\n", j, dlist_word); @@ -220,6 +222,9 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs, if (!drm_dev_enter(drm, &idx)) return; + if (hvs->vc4->is_vc5) + return; + /* The LUT memory is laid out with each HVS channel in order, * each of which takes 256 writes for R, 256 for G, then 256 * for B. @@ -413,13 +418,11 @@ void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan) if (!drm_dev_enter(drm, &idx)) return; - if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE) + if (!(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)) goto out; - HVS_WRITE(SCALER_DISPCTRLX(chan), - HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET); - HVS_WRITE(SCALER_DISPCTRLX(chan), - HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE); + HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET); + HVS_WRITE(SCALER_DISPCTRLX(chan), 0); /* Once we leave, the scaler should be disabled and its fifo empty. */ WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); @@ -823,9 +826,10 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) * our 16K), since we don't want to scramble the screen when * transitioning from the firmware's boot setup to runtime. */ + hvs->dlist_mem_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END; drm_mm_init(&hvs->dlist_mm, HVS_BOOTLOADER_DLIST_END, - (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END); + hvs->dlist_mem_size); /* Set up the HVS LBM memory manager. We could have some more * complicated data structure that allowed reuse of LBM areas @@ -906,6 +910,17 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) SCALER_DISPCTRL_SCLEIRQ); + /* Set AXI panic mode. + * VC4 panics when < 2 lines in FIFO. + * VC5 panics when less than 1 line in the FIFO. + */ + dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK | + SCALER_DISPCTRL_PANIC1_MASK | + SCALER_DISPCTRL_PANIC2_MASK); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); + /* Set AXI panic mode. * VC4 panics when < 2 lines in FIFO. * VC5 panics when less than 1 line in the FIFO. diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 9b98470593b0..20a418f64533 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -190,6 +190,13 @@ struct virtio_gpu_framebuffer { #define to_virtio_gpu_framebuffer(x) \ container_of(x, struct virtio_gpu_framebuffer, base) +struct virtio_gpu_plane_state { + struct drm_plane_state base; + struct virtio_gpu_fence *fence; +}; +#define to_virtio_gpu_plane_state(x) \ + container_of(x, struct virtio_gpu_plane_state, base) + struct virtio_gpu_queue { struct virtqueue *vq; spinlock_t qlock; diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 4c09e313bebc..0c073ba4974f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -66,11 +66,28 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) return format; } +static struct +drm_plane_state *virtio_gpu_plane_duplicate_state(struct drm_plane *plane) +{ + struct virtio_gpu_plane_state *new; + + if (WARN_ON(!plane->state)) + return NULL; + + new = kzalloc(sizeof(*new), GFP_KERNEL); + if (!new) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &new->base); + + return &new->base; +} + static const struct drm_plane_funcs virtio_gpu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .reset = drm_atomic_helper_plane_reset, - .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_duplicate_state = virtio_gpu_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, }; @@ -128,11 +145,13 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo; vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgplane_st = to_virtio_gpu_plane_state(plane->state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); - if (vgfb->fence) { + if (vgplane_st->fence) { struct virtio_gpu_object_array *objs; objs = virtio_gpu_array_alloc(1); @@ -141,13 +160,11 @@ static void virtio_gpu_resource_flush(struct drm_plane *plane, virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_array_lock_resv(objs); virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, - width, height, objs, vgfb->fence); + width, height, objs, + vgplane_st->fence); virtio_gpu_notify(vgdev); - - dma_fence_wait_timeout(&vgfb->fence->f, true, + dma_fence_wait_timeout(&vgplane_st->fence->f, true, msecs_to_jiffies(50)); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; } else { virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, width, height, NULL, NULL); @@ -237,20 +254,23 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo; if (!new_state->fb) return 0; vgfb = to_virtio_gpu_framebuffer(new_state->fb); + vgplane_st = to_virtio_gpu_plane_state(new_state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) return 0; - if (bo->dumb && (plane->state->fb != new_state->fb)) { - vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, + if (bo->dumb) { + vgplane_st->fence = virtio_gpu_fence_alloc(vgdev, + vgdev->fence_drv.context, 0); - if (!vgfb->fence) + if (!vgplane_st->fence) return -ENOMEM; } @@ -260,15 +280,15 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *state) { - struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; if (!state->fb) return; - vgfb = to_virtio_gpu_framebuffer(state->fb); - if (vgfb->fence) { - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; + vgplane_st = to_virtio_gpu_plane_state(state); + if (vgplane_st->fence) { + dma_fence_put(&vgplane_st->fence->f); + vgplane_st->fence = NULL; } } @@ -281,6 +301,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_output *output = NULL; struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_plane_state *vgplane_st; struct virtio_gpu_object *bo = NULL; uint32_t handle; @@ -293,6 +314,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (plane->state->fb) { vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + vgplane_st = to_virtio_gpu_plane_state(plane->state); bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); handle = bo->hw_res_handle; } else { @@ -312,11 +334,9 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, (vgdev, 0, plane->state->crtc_w, plane->state->crtc_h, - 0, 0, objs, vgfb->fence); + 0, 0, objs, vgplane_st->fence); virtio_gpu_notify(vgdev); - dma_fence_wait(&vgfb->fence->f, true); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; + dma_fence_wait(&vgplane_st->fence->f, true); } if (plane->state->fb != old_state->fb) { diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index f716c5796f5f..09025ff3b196 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -226,17 +226,19 @@ static int __init vkms_init(void) if (!config) return -ENOMEM; - default_config = config; - config->cursor = enable_cursor; config->writeback = enable_writeback; config->overlay = enable_overlay; ret = vkms_create(config); - if (ret) + if (ret) { kfree(config); + return ret; + } - return ret; + default_config = config; + + return 0; } static void vkms_destroy(struct vkms_config *config) @@ -260,9 +262,10 @@ static void vkms_destroy(struct vkms_config *config) static void __exit vkms_exit(void) { - if (default_config->dev) - vkms_destroy(default_config); + if (!default_config) + return; + vkms_destroy(default_config); kfree(default_config); } diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index fcaa958d841c..f34d4ec1d92f 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -205,6 +205,8 @@ static int zynqmp_dpsub_probe(struct platform_device *pdev) if (ret) return ret; + dma_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32)); + /* Try the reserved memory. Proceed if there's none. */ of_reserved_mem_device_init(&pdev->dev); diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile index e8014c1a2f8b..86195e9c59c4 100644 --- a/drivers/hid/Makefile +++ b/drivers/hid/Makefile @@ -159,7 +159,6 @@ obj-$(CONFIG_USB_KBD) += usbhid/ obj-$(CONFIG_I2C_HID_CORE) += i2c-hid/ obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/ -obj-$(INTEL_ISH_FIRMWARE_DOWNLOADER) += intel-ish-hid/ obj-$(CONFIG_AMD_SFH_HID) += amd-sfh-hid/ diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c index 37b2ce9b50fe..746b2abfc8fd 100644 --- a/drivers/hid/hid-apple.c +++ b/drivers/hid/hid-apple.c @@ -339,6 +339,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev) return false; } +static bool apple_is_omoton_kb066(struct hid_device *hdev) +{ + return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI && + strcmp(hdev->name, "Bluetooth Keyboard") == 0; +} + static inline void apple_setup_key_translation(struct input_dev *input, const struct apple_key_translation *table) { @@ -425,6 +431,7 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input, hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015) table = magic_keyboard_2015_fn_keys; else if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 || + hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 || hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 || hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021) table = apple2021_fn_keys; @@ -675,7 +682,7 @@ static int apple_input_configured(struct hid_device *hdev, { struct apple_sc *asc = hid_get_drvdata(hdev); - if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) { + if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) { hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n"); asc->quirks &= ~APPLE_HAS_FN; } @@ -1030,6 +1037,10 @@ static const struct hid_device_id apple_devices[] = { .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY }, { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021), .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024), + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY }, + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024), + .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021), .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY }, { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021), diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c index 8deded185725..c45e5aa569d2 100644 --- a/drivers/hid/hid-appleir.c +++ b/drivers/hid/hid-appleir.c @@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report, static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 }; unsigned long flags; - if (len != 5) + if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT)) goto out; if (!memcmp(data, keydown, sizeof(keydown))) { diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 2c1b292588f9..617adcc5c41a 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1129,6 +1129,8 @@ static void hid_apply_multiplier(struct hid_device *hid, while (multiplier_collection->parent_idx != -1 && multiplier_collection->type != HID_COLLECTION_LOGICAL) multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; + if (multiplier_collection->type != HID_COLLECTION_LOGICAL) + multiplier_collection = NULL; effective_multiplier = hid_calculate_multiplier(hid, multiplier); diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c index c6bdb9c4ef3e..d25291ed900d 100644 --- a/drivers/hid/hid-google-hammer.c +++ b/drivers/hid/hid-google-hammer.c @@ -269,11 +269,13 @@ static int cbas_ec_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_ACPI static const struct acpi_device_id cbas_ec_acpi_ids[] = { { "GOOG000B", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids); +#endif #ifdef CONFIG_OF static const struct of_device_id cbas_ec_of_match[] = { diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index f1c106f5e90b..e078d2ac92c8 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -41,6 +41,10 @@ #define USB_VENDOR_ID_ACTIONSTAR 0x2101 #define USB_DEVICE_ID_ACTIONSTAR_1011 0x1011 +#define USB_VENDOR_ID_ADATA_XPG 0x125f +#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE 0x7505 +#define USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE 0x7506 + #define USB_VENDOR_ID_ADS_TECH 0x06e1 #define USB_DEVICE_ID_ADS_TECH_RADIO_SI470X 0xa155 @@ -94,6 +98,7 @@ #define USB_DEVICE_ID_APPLE_MAGICMOUSE2 0x0269 #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e #define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265 +#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC 0x0324 #define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e #define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f #define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214 @@ -183,6 +188,7 @@ #define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242 #define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c +#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 0x0320 #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a #define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f #define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102 @@ -506,7 +512,6 @@ #define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PIT_E100 0xe100 #define I2C_VENDOR_ID_GOODIX 0x27c6 -#define I2C_DEVICE_ID_GOODIX_01E0 0x01e0 #define I2C_DEVICE_ID_GOODIX_01E8 0x01e8 #define I2C_DEVICE_ID_GOODIX_01E9 0x01e9 #define I2C_DEVICE_ID_GOODIX_01F0 0x01f0 @@ -1072,6 +1077,7 @@ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 +#define USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473 0x5473 #define I2C_VENDOR_ID_RAYDIUM 0x2386 #define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33 diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index c9c968d4b36a..9bb8daf7f786 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -224,7 +224,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda touch_minor = tdata[4]; state = tdata[7] & TOUCH_STATE_MASK; down = state != TOUCH_STATE_NONE; - } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) { + } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + input->id.product == + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { id = tdata[8] & 0xf; x = (tdata[1] << 27 | tdata[0] << 19) >> 19; y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19); @@ -256,8 +258,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda /* If requested, emulate a scroll wheel by detecting small * vertical touch motions. */ - if (emulate_scroll_wheel && (input->id.product != - USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) { + if (emulate_scroll_wheel && + input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && + input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { unsigned long now = jiffies; int step_x = msc->touches[id].scroll_x - x; int step_y = msc->touches[id].scroll_y - y; @@ -356,7 +359,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); - if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) + if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + input->id.product == + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) input_report_abs(input, ABS_MT_PRESSURE, pressure); if (report_undeciphered) { @@ -364,7 +369,9 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) input_event(input, EV_MSC, MSC_RAW, tdata[7]); else if (input->id.product != - USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && + input->id.product != + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) input_event(input, EV_MSC, MSC_RAW, tdata[8]); } } @@ -490,7 +497,9 @@ static int magicmouse_raw_event(struct hid_device *hdev, magicmouse_emit_buttons(msc, clicks & 3); input_report_rel(input, REL_X, x); input_report_rel(input, REL_Y, y); - } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) { + } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + input->id.product == + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { input_mt_sync_frame(input); input_report_key(input, BTN_MOUSE, clicks & 1); } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ @@ -542,7 +551,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __set_bit(REL_WHEEL_HI_RES, input->relbit); __set_bit(REL_HWHEEL_HI_RES, input->relbit); } - } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) { + } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + input->id.product == + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { /* If the trackpad has been connected to a Mac, the name is * automatically personalized, e.g., "José Expósito's Trackpad". * When connected through Bluetooth, the personalized name is @@ -618,7 +629,9 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd MOUSE_RES_X); input_abs_set_res(input, ABS_MT_POSITION_Y, MOUSE_RES_Y); - } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) { + } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + input->id.product == + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0); input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0); input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0); @@ -657,7 +670,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd input_set_events_per_packet(input, 60); if (report_undeciphered && - input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) { + input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && + input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { __set_bit(EV_MSC, input->evbit); __set_bit(MSC_RAW, input->mscbit); } @@ -682,7 +696,9 @@ static int magicmouse_input_mapping(struct hid_device *hdev, /* Magic Trackpad does not give relative data after switching to MT */ if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD || - hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) && + hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + hi->input->id.product == + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && field->flags & HID_MAIN_ITEM_RELATIVE) return -1; @@ -718,7 +734,8 @@ static int magicmouse_enable_multitouch(struct hid_device *hdev) int ret; int feature_size; - if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) { + if (hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { if (hdev->vendor == BT_VENDOR_ID_APPLE) { feature_size = sizeof(feature_mt_trackpad2_bt); feature = feature_mt_trackpad2_bt; @@ -763,7 +780,8 @@ static int magicmouse_fetch_battery(struct hid_device *hdev) if (!hdev->battery || hdev->vendor != USB_VENDOR_ID_APPLE || (hdev->product != USB_DEVICE_ID_APPLE_MAGICMOUSE2 && - hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) + hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && + hdev->product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC)) return -1; report_enum = &hdev->report_enum[hdev->battery_report_type]; @@ -832,7 +850,9 @@ static int magicmouse_probe(struct hid_device *hdev, if (id->vendor == USB_VENDOR_ID_APPLE && (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || - (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && hdev->type != HID_TYPE_USBMOUSE))) + ((id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && + hdev->type != HID_TYPE_USBMOUSE))) return 0; if (!msc->input) { @@ -847,7 +867,8 @@ static int magicmouse_probe(struct hid_device *hdev, else if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2) report = hid_register_report(hdev, HID_INPUT_REPORT, MOUSE2_REPORT_ID, 0); - else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) { + else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) { if (id->vendor == BT_VENDOR_ID_APPLE) report = hid_register_report(hdev, HID_INPUT_REPORT, TRACKPAD2_BT_REPORT_ID, 0); @@ -917,7 +938,8 @@ static __u8 *magicmouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, */ if (hdev->vendor == USB_VENDOR_ID_APPLE && (hdev->product == USB_DEVICE_ID_APPLE_MAGICMOUSE2 || - hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) && + hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 || + hdev->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC) && *rsize == 83 && rdesc[46] == 0x84 && rdesc[58] == 0x85) { hid_info(hdev, "fixing up magicmouse battery report descriptor\n"); @@ -948,6 +970,10 @@ static const struct hid_device_id magic_mice[] = { USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 }, + { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_MAGICTRACKPAD2_USBC), .driver_data = 0 }, { } }; MODULE_DEVICE_TABLE(hid, magic_mice); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index bf9cad711259..6386043aab0b 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1447,8 +1447,7 @@ static __u8 *mt_report_fixup(struct hid_device *hdev, __u8 *rdesc, { if (hdev->vendor == I2C_VENDOR_ID_GOODIX && (hdev->product == I2C_DEVICE_ID_GOODIX_01E8 || - hdev->product == I2C_DEVICE_ID_GOODIX_01E9 || - hdev->product == I2C_DEVICE_ID_GOODIX_01E0)) { + hdev->product == I2C_DEVICE_ID_GOODIX_01E9)) { if (rdesc[607] == 0x15) { rdesc[607] = 0x25; dev_info( @@ -1669,9 +1668,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi) break; } - if (suffix) + if (suffix) { hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, suffix); + if (!hi->input->name) + return -ENOMEM; + } return 0; } @@ -2074,9 +2076,6 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01E9) }, - { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT_NSMU, - HID_DEVICE(BUS_I2C, HID_GROUP_ANY, I2C_VENDOR_ID_GOODIX, - I2C_DEVICE_ID_GOODIX_01E0) }, /* GoodTouch panels */ { .driver_data = MT_CLS_NSMU, diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c index 25cfd964dc25..acb9eb18f7cc 100644 --- a/drivers/hid/hid-plantronics.c +++ b/drivers/hid/hid-plantronics.c @@ -6,9 +6,6 @@ * Copyright (c) 2015-2018 Terry Junge */ -/* - */ - #include "hid-ids.h" #include @@ -23,30 +20,28 @@ #define PLT_VOL_UP 0x00b1 #define PLT_VOL_DOWN 0x00b2 +#define PLT_MIC_MUTE 0x00b5 #define PLT1_VOL_UP (PLT_HID_1_0_PAGE | PLT_VOL_UP) #define PLT1_VOL_DOWN (PLT_HID_1_0_PAGE | PLT_VOL_DOWN) +#define PLT1_MIC_MUTE (PLT_HID_1_0_PAGE | PLT_MIC_MUTE) #define PLT2_VOL_UP (PLT_HID_2_0_PAGE | PLT_VOL_UP) #define PLT2_VOL_DOWN (PLT_HID_2_0_PAGE | PLT_VOL_DOWN) +#define PLT2_MIC_MUTE (PLT_HID_2_0_PAGE | PLT_MIC_MUTE) +#define HID_TELEPHONY_MUTE (HID_UP_TELEPHONY | 0x2f) +#define HID_CONSUMER_MUTE (HID_UP_CONSUMER | 0xe2) #define PLT_DA60 0xda60 #define PLT_BT300_MIN 0x0413 #define PLT_BT300_MAX 0x0418 - -#define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \ - (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) - -#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0) -#define PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS BIT(1) - #define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */ -#define PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT 220 /* ms */ struct plt_drv_data { unsigned long device_type; - unsigned long last_volume_key_ts; - u32 quirks; + unsigned long last_key_ts; + unsigned long double_key_to; + __u16 last_key; }; static int plantronics_input_mapping(struct hid_device *hdev, @@ -58,34 +53,43 @@ static int plantronics_input_mapping(struct hid_device *hdev, unsigned short mapped_key; struct plt_drv_data *drv_data = hid_get_drvdata(hdev); unsigned long plt_type = drv_data->device_type; + int allow_mute = usage->hid == HID_TELEPHONY_MUTE; + int allow_consumer = field->application == HID_CP_CONSUMERCONTROL && + (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER && + usage->hid != HID_CONSUMER_MUTE; /* special case for PTT products */ if (field->application == HID_GD_JOYSTICK) goto defaulted; - /* handle volume up/down mapping */ /* non-standard types or multi-HID interfaces - plt_type is PID */ if (!(plt_type & HID_USAGE_PAGE)) { switch (plt_type) { case PLT_DA60: - if (PLT_ALLOW_CONSUMER) + if (allow_consumer) goto defaulted; - goto ignored; + if (usage->hid == HID_CONSUMER_MUTE) { + mapped_key = KEY_MICMUTE; + goto mapped; + } + break; default: - if (PLT_ALLOW_CONSUMER) + if (allow_consumer || allow_mute) goto defaulted; } + goto ignored; } - /* handle standard types - plt_type is 0xffa0uuuu or 0xffa2uuuu */ - /* 'basic telephony compliant' - allow default consumer page map */ - else if ((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY && - (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) { - if (PLT_ALLOW_CONSUMER) - goto defaulted; - } - /* not 'basic telephony' - apply legacy mapping */ - /* only map if the field is in the device's primary vendor page */ - else if (!((field->application ^ plt_type) & HID_USAGE_PAGE)) { + + /* handle standard consumer control mapping */ + /* and standard telephony mic mute mapping */ + if (allow_consumer || allow_mute) + goto defaulted; + + /* handle vendor unique types - plt_type is 0xffa0uuuu or 0xffa2uuuu */ + /* if not 'basic telephony compliant' - map vendor unique controls */ + if (!((plt_type & HID_USAGE) >= PLT_BASIC_TELEPHONY && + (plt_type & HID_USAGE) != PLT_BASIC_EXCEPTION) && + !((field->application ^ plt_type) & HID_USAGE_PAGE)) switch (usage->hid) { case PLT1_VOL_UP: case PLT2_VOL_UP: @@ -95,8 +99,11 @@ static int plantronics_input_mapping(struct hid_device *hdev, case PLT2_VOL_DOWN: mapped_key = KEY_VOLUMEDOWN; goto mapped; + case PLT1_MIC_MUTE: + case PLT2_MIC_MUTE: + mapped_key = KEY_MICMUTE; + goto mapped; } - } /* * Future mapping of call control or other usages, @@ -105,6 +112,8 @@ static int plantronics_input_mapping(struct hid_device *hdev, */ ignored: + hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n", + usage->hid, field->application); return -1; defaulted: @@ -123,38 +132,26 @@ static int plantronics_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct plt_drv_data *drv_data = hid_get_drvdata(hdev); + unsigned long prev_tsto, cur_ts; + __u16 prev_key, cur_key; - if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) { - unsigned long prev_ts, cur_ts; + /* Usages are filtered in plantronics_usages. */ - /* Usages are filtered in plantronics_usages. */ + /* HZ too low for ms resolution - double key detection disabled */ + /* or it is a key release - handle key presses only. */ + if (!drv_data->double_key_to || !value) + return 0; - if (!value) /* Handle key presses only. */ - return 0; + prev_tsto = drv_data->last_key_ts + drv_data->double_key_to; + cur_ts = drv_data->last_key_ts = jiffies; + prev_key = drv_data->last_key; + cur_key = drv_data->last_key = usage->code; - prev_ts = drv_data->last_volume_key_ts; - cur_ts = jiffies; - if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT) - return 1; /* Ignore the repeated key. */ - - drv_data->last_volume_key_ts = cur_ts; + /* If the same key occurs in <= double_key_to -- ignore it */ + if (prev_key == cur_key && time_before_eq(cur_ts, prev_tsto)) { + hid_dbg(hdev, "double key %d ignored\n", cur_key); + return 1; /* Ignore the repeated key. */ } - if (drv_data->quirks & PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS) { - unsigned long prev_ts, cur_ts; - - /* Usages are filtered in plantronics_usages. */ - - if (!value) /* Handle key presses only. */ - return 0; - - prev_ts = drv_data->last_volume_key_ts; - cur_ts = jiffies; - if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_FOLLOWED_OPPOSITE_KEY_TIMEOUT) - return 1; /* Ignore the followed opposite volume key. */ - - drv_data->last_volume_key_ts = cur_ts; - } - return 0; } @@ -196,12 +193,16 @@ static int plantronics_probe(struct hid_device *hdev, ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); - goto err; + return ret; } drv_data->device_type = plantronics_device_type(hdev); - drv_data->quirks = id->driver_data; - drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT); + drv_data->double_key_to = msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT); + drv_data->last_key_ts = jiffies - drv_data->double_key_to; + + /* if HZ does not allow ms resolution - disable double key detection */ + if (drv_data->double_key_to < PLT_DOUBLE_KEY_TIMEOUT) + drv_data->double_key_to = 0; hid_set_drvdata(hdev, drv_data); @@ -210,29 +211,10 @@ static int plantronics_probe(struct hid_device *hdev, if (ret) hid_err(hdev, "hw start failed\n"); -err: return ret; } static const struct hid_device_id plantronics_devices[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3210_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3215_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3225_SERIES), - .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3325_SERIES), - .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS }, - { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, - USB_DEVICE_ID_PLANTRONICS_ENCOREPRO_500_SERIES), - .driver_data = PLT_QUIRK_FOLLOWED_OPPOSITE_VOLUME_KEYS }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { } }; @@ -241,6 +223,14 @@ MODULE_DEVICE_TABLE(hid, plantronics_devices); static const struct hid_usage_id plantronics_usages[] = { { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID }, { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID }, + { HID_TELEPHONY_MUTE, EV_KEY, HID_ANY_ID }, + { HID_CONSUMER_MUTE, EV_KEY, HID_ANY_ID }, + { PLT2_VOL_UP, EV_KEY, HID_ANY_ID }, + { PLT2_VOL_DOWN, EV_KEY, HID_ANY_ID }, + { PLT2_MIC_MUTE, EV_KEY, HID_ANY_ID }, + { PLT1_VOL_UP, EV_KEY, HID_ANY_ID }, + { PLT1_VOL_DOWN, EV_KEY, HID_ANY_ID }, + { PLT1_MIC_MUTE, EV_KEY, HID_ANY_ID }, { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR } }; diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index debc49272a5c..d8c5c7d451ef 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -27,6 +27,8 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_GAMEPAD), HID_QUIRK_BADPAD }, { HID_USB_DEVICE(USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR), HID_QUIRK_BADPAD }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE), HID_QUIRK_ALWAYS_POLL }, + { HID_USB_DEVICE(USB_VENDOR_ID_ADATA_XPG, USB_VENDOR_ID_ADATA_XPG_WL_GAMING_MOUSE_DONGLE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016), HID_QUIRK_FULLSPEED_INTERVAL }, { HID_USB_DEVICE(USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS), HID_QUIRK_NOGET }, { HID_USB_DEVICE(USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX), HID_QUIRK_NO_INIT_REPORTS }, @@ -882,6 +884,7 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) }, #endif { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, + { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) }, { } }; diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 6abd3e2a9094..3eeac7011739 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -728,23 +728,30 @@ err_stop_hw: return ret; } +static int sensor_hub_finalize_pending_fn(struct device *dev, void *data) +{ + struct hid_sensor_hub_device *hsdev = dev->platform_data; + + if (hsdev->pending.status) + complete(&hsdev->pending.ready); + + return 0; +} + static void sensor_hub_remove(struct hid_device *hdev) { struct sensor_hub_data *data = hid_get_drvdata(hdev); unsigned long flags; - int i; hid_dbg(hdev, " hardware removed\n"); hid_hw_close(hdev); hid_hw_stop(hdev); + spin_lock_irqsave(&data->lock, flags); - for (i = 0; i < data->hid_sensor_client_cnt; ++i) { - struct hid_sensor_hub_device *hsdev = - data->hid_sensor_hub_client_devs[i].platform_data; - if (hsdev->pending.status) - complete(&hsdev->pending.ready); - } + device_for_each_child(&hdev->dev, NULL, + sensor_hub_finalize_pending_fn); spin_unlock_irqrestore(&data->lock, flags); + mfd_remove_devices(&hdev->dev); mutex_destroy(&data->mutex); } diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c index cf1679b0d4fb..0bf70664c35e 100644 --- a/drivers/hid/hid-thrustmaster.c +++ b/drivers/hid/hid-thrustmaster.c @@ -170,6 +170,15 @@ static void thrustmaster_interrupts(struct hid_device *hdev) ep = &usbif->cur_altsetting->endpoint[1]; b_ep = ep->desc.bEndpointAddress; + /* Are the expected endpoints present? */ + u8 ep_addr[2] = {b_ep, 0}; + + if (!usb_check_int_endpoints(usbif, ep_addr)) { + kfree(send_buf); + hid_err(hdev, "Unexpected non-int endpoint\n"); + return; + } + for (i = 0; i < ARRAY_SIZE(setup_arr); ++i) { memcpy(send_buf, setup_arr[i], setup_arr_sizes[i]); diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c index 39114d5c55a0..5b35f9f321d4 100644 --- a/drivers/hid/hid-uclogic-core.c +++ b/drivers/hid/hid-uclogic-core.c @@ -142,11 +142,12 @@ static int uclogic_input_configured(struct hid_device *hdev, suffix = "System Control"; break; } - } - - if (suffix) + } else { hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s %s", hdev->name, suffix); + if (!hi->input->name) + return -ENOMEM; + } return 0; } diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c index 0b05bb1e4410..8a7ac016b1ab 100644 --- a/drivers/hid/i2c-hid/i2c-hid-core.c +++ b/drivers/hid/i2c-hid/i2c-hid-core.c @@ -261,7 +261,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid, ihid->rawbuf, recv_len + sizeof(__le16)); if (error) { dev_err(&ihid->client->dev, - "failed to set a report to device: %d\n", error); + "failed to get a report from device: %d\n", error); return error; } diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c index dd5fc60874ba..42141a78bdb4 100644 --- a/drivers/hid/intel-ish-hid/ipc/ipc.c +++ b/drivers/hid/intel-ish-hid/ipc/ipc.c @@ -517,6 +517,10 @@ static int ish_fw_reset_handler(struct ishtp_device *dev) /* ISH FW is dead */ if (!ish_is_input_ready(dev)) return -EPIPE; + + /* Send clock sync at once after reset */ + ishtp_dev->prev_sync = 0; + /* * Set HOST2ISH.ILUP. Apparently we need this BEFORE sending * RESET_NOTIFY_ACK - FW will be checking for it @@ -576,15 +580,14 @@ static void fw_reset_work_fn(struct work_struct *unused) */ static void _ish_sync_fw_clock(struct ishtp_device *dev) { - static unsigned long prev_sync; - uint64_t usec; + struct ipc_time_update_msg time = {}; - if (prev_sync && time_before(jiffies, prev_sync + 20 * HZ)) + if (dev->prev_sync && time_before(jiffies, dev->prev_sync + 20 * HZ)) return; - prev_sync = jiffies; - usec = ktime_to_us(ktime_get_boottime()); - ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t)); + dev->prev_sync = jiffies; + /* The fields of time would be updated while sending message */ + ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &time, sizeof(time)); } /** diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c index 14c271d7d8a9..0377dac3fc9a 100644 --- a/drivers/hid/intel-ish-hid/ishtp-hid.c +++ b/drivers/hid/intel-ish-hid/ishtp-hid.c @@ -261,12 +261,14 @@ err_hid_data: */ void ishtp_hid_remove(struct ishtp_cl_data *client_data) { + void *data; int i; for (i = 0; i < client_data->num_hid_devices; ++i) { if (client_data->hid_sensor_hubs[i]) { - kfree(client_data->hid_sensor_hubs[i]->driver_data); + data = client_data->hid_sensor_hubs[i]->driver_data; hid_destroy_device(client_data->hid_sensor_hubs[i]); + kfree(data); client_data->hid_sensor_hubs[i] = NULL; } } diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h index 32142c7d9a04..9b2ee3fe04b8 100644 --- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h +++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h @@ -212,6 +212,8 @@ struct ishtp_device { unsigned int ipc_tx_cnt; unsigned long long ipc_tx_bytes_cnt; + /* Time of the last clock sync */ + unsigned long prev_sync; const struct ishtp_hw_ops *ops; size_t mtu; uint32_t ishtp_msg_hdr; diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c index 3b4ee21cd811..26cb331b646c 100644 --- a/drivers/hid/usbhid/hid-pidff.c +++ b/drivers/hid/usbhid/hid-pidff.c @@ -21,6 +21,7 @@ #include "usbhid.h" #define PID_EFFECTS_MAX 64 +#define PID_INFINITE 0xffff /* Report usage table used to put reports into an array */ @@ -261,10 +262,22 @@ static void pidff_set_envelope_report(struct pidff_device *pidff, static int pidff_needs_set_envelope(struct ff_envelope *envelope, struct ff_envelope *old) { - return envelope->attack_level != old->attack_level || - envelope->fade_level != old->fade_level || + bool needs_new_envelope; + needs_new_envelope = envelope->attack_level != 0 || + envelope->fade_level != 0 || + envelope->attack_length != 0 || + envelope->fade_length != 0; + + if (!needs_new_envelope) + return false; + + if (!old) + return needs_new_envelope; + + return envelope->attack_level != old->attack_level || + envelope->fade_level != old->fade_level || envelope->attack_length != old->attack_length || - envelope->fade_length != old->fade_length; + envelope->fade_length != old->fade_length; } /* @@ -301,7 +314,12 @@ static void pidff_set_effect_report(struct pidff_device *pidff, pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff->set_effect_type->value[0] = pidff->create_new_effect_type->value[0]; - pidff->set_effect[PID_DURATION].value[0] = effect->replay.length; + + /* Convert infinite length from Linux API (0) + to PID standard (NULL) if needed */ + pidff->set_effect[PID_DURATION].value[0] = + effect->replay.length == 0 ? PID_INFINITE : effect->replay.length; + pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button; pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = effect->trigger.interval; @@ -574,11 +592,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_constant(effect, old)) pidff_set_constant_force_report(pidff, effect); - if (!old || - pidff_needs_set_envelope(&effect->u.constant.envelope, - &old->u.constant.envelope)) - pidff_set_envelope_report(pidff, - &effect->u.constant.envelope); + if (pidff_needs_set_envelope(&effect->u.constant.envelope, + old ? &old->u.constant.envelope : NULL)) + pidff_set_envelope_report(pidff, &effect->u.constant.envelope); break; case FF_PERIODIC: @@ -613,11 +629,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_periodic(effect, old)) pidff_set_periodic_report(pidff, effect); - if (!old || - pidff_needs_set_envelope(&effect->u.periodic.envelope, - &old->u.periodic.envelope)) - pidff_set_envelope_report(pidff, - &effect->u.periodic.envelope); + if (pidff_needs_set_envelope(&effect->u.periodic.envelope, + old ? &old->u.periodic.envelope : NULL)) + pidff_set_envelope_report(pidff, &effect->u.periodic.envelope); break; case FF_RAMP: @@ -631,11 +645,9 @@ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_ramp(effect, old)) pidff_set_ramp_force_report(pidff, effect); - if (!old || - pidff_needs_set_envelope(&effect->u.ramp.envelope, - &old->u.ramp.envelope)) - pidff_set_envelope_report(pidff, - &effect->u.ramp.envelope); + if (pidff_needs_set_envelope(&effect->u.ramp.envelope, + old ? &old->u.ramp.envelope : NULL)) + pidff_set_envelope_report(pidff, &effect->u.ramp.envelope); break; case FF_SPRING: @@ -758,6 +770,11 @@ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude) static int pidff_find_fields(struct pidff_usage *usage, const u8 *table, struct hid_report *report, int count, int strict) { + if (!report) { + pr_debug("pidff_find_fields, null report\n"); + return -1; + } + int i, j, k, found; for (k = 0; k < count; k++) { @@ -871,6 +888,11 @@ static int pidff_reports_ok(struct pidff_device *pidff) static struct hid_field *pidff_find_special_field(struct hid_report *report, int usage, int enforce_min) { + if (!report) { + pr_debug("pidff_find_special_field, null report\n"); + return NULL; + } + int i; for (i = 0; i < report->maxfield; i++) { diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c index c439ed2f16db..af6bc76dbf64 100644 --- a/drivers/hid/usbhid/usbkbd.c +++ b/drivers/hid/usbhid/usbkbd.c @@ -160,7 +160,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type, return -1; spin_lock_irqsave(&kbd->leds_lock, flags); - kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | + kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 4) | (!!test_bit(LED_COMPOSE, dev->led) << 3) | (!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) | (!!test_bit(LED_NUML, dev->led)); diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 12d4c28741d7..b3395b3eb804 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -2234,7 +2234,8 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix) if (hid_is_usb(wacom->hdev)) { struct usb_interface *intf = to_usb_interface(wacom->hdev->dev.parent); struct usb_device *dev = interface_to_usbdev(intf); - product_name = dev->product; + if (dev->product != NULL) + product_name = dev->product; } if (wacom->hdev->bus == BUS_I2C) { diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 245a6e9631a0..ce54b8354a7d 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -1396,9 +1396,9 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) rotation -= 1800; input_report_abs(pen_input, ABS_TILT_X, - (char)frame[7]); + (signed char)frame[7]); input_report_abs(pen_input, ABS_TILT_Y, - (char)frame[8]); + (signed char)frame[8]); input_report_abs(pen_input, ABS_Z, rotation); input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11])); @@ -4914,6 +4914,10 @@ static const struct wacom_features wacom_features_0x94 = HID_DEVICE(BUS_I2C, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ .driver_data = (kernel_ulong_t)&wacom_features_##prod +#define PCI_DEVICE_WACOM(prod) \ + HID_DEVICE(BUS_PCI, HID_GROUP_WACOM, USB_VENDOR_ID_WACOM, prod),\ + .driver_data = (kernel_ulong_t)&wacom_features_##prod + #define USB_DEVICE_LENOVO(prod) \ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \ .driver_data = (kernel_ulong_t)&wacom_features_##prod @@ -5083,6 +5087,7 @@ const struct hid_device_id wacom_ids[] = { { USB_DEVICE_WACOM(HID_ANY_ID) }, { I2C_DEVICE_WACOM(HID_ANY_ID) }, + { PCI_DEVICE_WACOM(HID_ANY_ID) }, { BT_DEVICE_WACOM(HID_ANY_ID) }, { } }; diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 274ad8443f8c..b3f1b7746eab 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -403,6 +403,7 @@ static void ssip_reset(struct hsi_client *cl) del_timer(&ssi->rx_wd); del_timer(&ssi->tx_wd); del_timer(&ssi->keep_alive); + cancel_work_sync(&ssi->work); ssi->main_state = 0; ssi->send_state = 0; ssi->recv_state = 0; diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 47e1bd8de9fc..53026356475a 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -1113,68 +1113,10 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer, EXPORT_SYMBOL(vmbus_sendpacket); /* - * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer - * packets using a GPADL Direct packet type. This interface allows you - * to control notifying the host. This will be useful for sending - * batched data. Also the sender can control the send flags - * explicitly. - */ -int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, - struct hv_page_buffer pagebuffers[], - u32 pagecount, void *buffer, u32 bufferlen, - u64 requestid) -{ - int i; - struct vmbus_channel_packet_page_buffer desc; - u32 descsize; - u32 packetlen; - u32 packetlen_aligned; - struct kvec bufferlist[3]; - u64 aligned_data = 0; - - if (pagecount > MAX_PAGE_BUFFER_COUNT) - return -EINVAL; - - /* - * Adjust the size down since vmbus_channel_packet_page_buffer is the - * largest size we support - */ - descsize = sizeof(struct vmbus_channel_packet_page_buffer) - - ((MAX_PAGE_BUFFER_COUNT - pagecount) * - sizeof(struct hv_page_buffer)); - packetlen = descsize + bufferlen; - packetlen_aligned = ALIGN(packetlen, sizeof(u64)); - - /* Setup the descriptor */ - desc.type = VM_PKT_DATA_USING_GPA_DIRECT; - desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; - desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */ - desc.length8 = (u16)(packetlen_aligned >> 3); - desc.transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */ - desc.reserved = 0; - desc.rangecount = pagecount; - - for (i = 0; i < pagecount; i++) { - desc.range[i].len = pagebuffers[i].len; - desc.range[i].offset = pagebuffers[i].offset; - desc.range[i].pfn = pagebuffers[i].pfn; - } - - bufferlist[0].iov_base = &desc; - bufferlist[0].iov_len = descsize; - bufferlist[1].iov_base = buffer; - bufferlist[1].iov_len = bufferlen; - bufferlist[2].iov_base = &aligned_data; - bufferlist[2].iov_len = (packetlen_aligned - packetlen); - - return hv_ringbuffer_write(channel, bufferlist, 3, requestid, NULL); -} -EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer); - -/* - * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet + * vmbus_sendpacket_mpb_desc - Send one or more multi-page buffer packets * using a GPADL Direct packet type. - * The buffer includes the vmbus descriptor. + * The desc argument must include space for the VMBus descriptor. The + * rangecount field must already be set. */ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct vmbus_packet_mpb_array *desc, @@ -1196,7 +1138,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, desc->length8 = (u16)(packetlen_aligned >> 3); desc->transactionid = VMBUS_RQST_ERROR; /* will be updated in hv_ringbuffer_write() */ desc->reserved = 0; - desc->rangecount = 1; bufferlist[0].iov_base = desc; bufferlist[0].iov_len = desc_size; diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index d35b60c06114..77017d951826 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -767,6 +767,12 @@ hv_kvp_init(struct hv_util_service *srv) */ kvp_transaction.state = HVUTIL_DEVICE_INIT; + return 0; +} + +int +hv_kvp_init_transport(void) +{ hvt = hvutil_transport_init(kvp_devname, CN_KVP_IDX, CN_KVP_VAL, kvp_on_msg, kvp_on_reset); if (!hvt) diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index 0d2184be1691..397f4c8fa46c 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c @@ -388,6 +388,12 @@ hv_vss_init(struct hv_util_service *srv) */ vss_transaction.state = HVUTIL_DEVICE_INIT; + return 0; +} + +int +hv_vss_init_transport(void) +{ hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL, vss_on_msg, vss_on_reset); if (!hvt) { diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 835e6039c186..ad6c066fd2b7 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -141,6 +141,7 @@ static struct hv_util_service util_heartbeat = { static struct hv_util_service util_kvp = { .util_cb = hv_kvp_onchannelcallback, .util_init = hv_kvp_init, + .util_init_transport = hv_kvp_init_transport, .util_pre_suspend = hv_kvp_pre_suspend, .util_pre_resume = hv_kvp_pre_resume, .util_deinit = hv_kvp_deinit, @@ -149,6 +150,7 @@ static struct hv_util_service util_kvp = { static struct hv_util_service util_vss = { .util_cb = hv_vss_onchannelcallback, .util_init = hv_vss_init, + .util_init_transport = hv_vss_init_transport, .util_pre_suspend = hv_vss_pre_suspend, .util_pre_resume = hv_vss_pre_resume, .util_deinit = hv_vss_deinit, @@ -592,6 +594,13 @@ static int util_probe(struct hv_device *dev, if (ret) goto error; + if (srv->util_init_transport) { + ret = srv->util_init_transport(); + if (ret) { + vmbus_close(dev->channel); + goto error; + } + } return 0; error: diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index dc673edf053c..ab12e21bf5fc 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -365,12 +365,14 @@ void vmbus_on_event(unsigned long data); void vmbus_on_msg_dpc(unsigned long data); int hv_kvp_init(struct hv_util_service *srv); +int hv_kvp_init_transport(void); void hv_kvp_deinit(void); int hv_kvp_pre_suspend(void); int hv_kvp_pre_resume(void); void hv_kvp_onchannelcallback(void *context); int hv_vss_init(struct hv_util_service *srv); +int hv_vss_init_transport(void); void hv_vss_deinit(void); int hv_vss_pre_suspend(void); int hv_vss_pre_resume(void); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a2191bc5c153..dfbfdbf9cbd7 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -2409,12 +2409,25 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size) struct resource *iter; mutex_lock(&hyperv_mmio_lock); + + /* + * If all bytes of the MMIO range to be released are within the + * special case fb_mmio shadow region, skip releasing the shadow + * region since no corresponding __request_region() was done + * in vmbus_allocate_mmio(). + */ + if (fb_mmio && start >= fb_mmio->start && + (start + size - 1 <= fb_mmio->end)) + goto skip_shadow_release; + for (iter = hyperv_mmio; iter; iter = iter->sibling) { if ((iter->start >= start + size) || (iter->end <= start)) continue; __release_region(iter, start, size); } + +skip_shadow_release: release_mem_region(start, size); mutex_unlock(&hyperv_mmio_lock); diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index 7802bbf5f958..59424103f634 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c @@ -22,11 +22,13 @@ */ #define AD7314_TEMP_MASK 0x7FE0 #define AD7314_TEMP_SHIFT 5 +#define AD7314_LEADING_ZEROS_MASK BIT(15) /* * ADT7301 and ADT7302 temperature masks */ #define ADT7301_TEMP_MASK 0x3FFF +#define ADT7301_LEADING_ZEROS_MASK (BIT(15) | BIT(14)) enum ad7314_variant { adt7301, @@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev, return ret; switch (spi_get_device_id(chip->spi_dev)->driver_data) { case ad7314: + if (ret & AD7314_LEADING_ZEROS_MASK) { + /* Invalid read-out, leading zero part is missing */ + return -EIO; + } data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT; data = sign_extend32(data, 9); return sprintf(buf, "%d\n", 250 * data); case adt7301: case adt7302: + if (ret & ADT7301_LEADING_ZEROS_MASK) { + /* Invalid read-out, leading zero part is missing */ + return -EIO; + } /* * Documented as a 13 bit twos complement register * with a sign bit - which is a 14 bit 2's complement diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c index 1572b5416015..dbcb8f362061 100644 --- a/drivers/hwmon/dell-smm-hwmon.c +++ b/drivers/hwmon/dell-smm-hwmon.c @@ -67,7 +67,7 @@ #define I8K_POWER_BATTERY 0x01 #define DELL_SMM_NO_TEMP 10 -#define DELL_SMM_NO_FANS 3 +#define DELL_SMM_NO_FANS 4 struct dell_smm_data { struct mutex i8k_mutex; /* lock for sensors writes */ @@ -940,11 +940,14 @@ static const struct hwmon_channel_info *dell_smm_info[] = { HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | HWMON_F_TARGET, HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | + HWMON_F_TARGET, + HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX | HWMON_F_TARGET ), HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE, HWMON_PWM_INPUT, + HWMON_PWM_INPUT, HWMON_PWM_INPUT ), NULL diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index ba408942dbe7..f1926b9171e0 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -392,7 +392,12 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev, if (state >= fan_data->num_speed) return -EINVAL; + mutex_lock(&fan_data->lock); + set_fan_speed(fan_data, state); + + mutex_unlock(&fan_data->lock); + return 0; } @@ -488,7 +493,11 @@ MODULE_DEVICE_TABLE(of, of_gpio_fan_match); static void gpio_fan_stop(void *data) { + struct gpio_fan_data *fan_data = data; + + mutex_lock(&fan_data->lock); set_fan_speed(data, 0); + mutex_unlock(&fan_data->lock); } static int gpio_fan_probe(struct platform_device *pdev) @@ -561,7 +570,9 @@ static int gpio_fan_suspend(struct device *dev) if (fan_data->gpios) { fan_data->resume_speed = fan_data->speed_index; + mutex_lock(&fan_data->lock); set_fan_speed(fan_data, 0); + mutex_unlock(&fan_data->lock); } return 0; @@ -571,8 +582,11 @@ static int gpio_fan_resume(struct device *dev) { struct gpio_fan_data *fan_data = dev_get_drvdata(dev); - if (fan_data->gpios) + if (fan_data->gpios) { + mutex_lock(&fan_data->lock); set_fan_speed(fan_data, fan_data->resume_speed); + mutex_unlock(&fan_data->lock); + } return 0; } diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c index 83e424945b59..ec3ff4e9a9ab 100644 --- a/drivers/hwmon/nct6775-core.c +++ b/drivers/hwmon/nct6775-core.c @@ -276,8 +276,8 @@ static const s8 NCT6776_BEEP_BITS[] = { static const u16 NCT6776_REG_TOLERANCE_H[] = { 0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c, 0xb0c }; -static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 }; -static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 }; +static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0, 0 }; +static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0, 0 }; static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; @@ -2787,8 +2787,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, - data->target_temp_mask); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, data->target_temp_mask * 1000), 1000); mutex_lock(&data->update_lock); data->target_temp[nr] = val; @@ -2868,7 +2867,7 @@ store_temp_tolerance(struct device *dev, struct device_attribute *attr, return err; /* Limit tolerance as needed */ - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, data->tolerance_mask); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, data->tolerance_mask * 1000), 1000); mutex_lock(&data->update_lock); data->temp_tolerance[index][nr] = val; @@ -2994,7 +2993,7 @@ store_weight_temp(struct device *dev, struct device_attribute *attr, if (err < 0) return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 255); + val = DIV_ROUND_CLOSEST(clamp_val(val, 0, 255000), 1000); mutex_lock(&data->update_lock); data->weight_temp[index][nr] = val; diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 4efbacce5d0c..6bb1900b021c 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = { }; static const struct ntc_compensation ncpXXxh103[] = { - { .temp_c = -40, .ohm = 247565 }, - { .temp_c = -35, .ohm = 181742 }, - { .temp_c = -30, .ohm = 135128 }, - { .temp_c = -25, .ohm = 101678 }, - { .temp_c = -20, .ohm = 77373 }, - { .temp_c = -15, .ohm = 59504 }, - { .temp_c = -10, .ohm = 46222 }, - { .temp_c = -5, .ohm = 36244 }, - { .temp_c = 0, .ohm = 28674 }, - { .temp_c = 5, .ohm = 22878 }, - { .temp_c = 10, .ohm = 18399 }, - { .temp_c = 15, .ohm = 14910 }, - { .temp_c = 20, .ohm = 12169 }, + { .temp_c = -40, .ohm = 195652 }, + { .temp_c = -35, .ohm = 148171 }, + { .temp_c = -30, .ohm = 113347 }, + { .temp_c = -25, .ohm = 87559 }, + { .temp_c = -20, .ohm = 68237 }, + { .temp_c = -15, .ohm = 53650 }, + { .temp_c = -10, .ohm = 42506 }, + { .temp_c = -5, .ohm = 33892 }, + { .temp_c = 0, .ohm = 27219 }, + { .temp_c = 5, .ohm = 22021 }, + { .temp_c = 10, .ohm = 17926 }, + { .temp_c = 15, .ohm = 14674 }, + { .temp_c = 20, .ohm = 12081 }, { .temp_c = 25, .ohm = 10000 }, - { .temp_c = 30, .ohm = 8271 }, - { .temp_c = 35, .ohm = 6883 }, - { .temp_c = 40, .ohm = 5762 }, - { .temp_c = 45, .ohm = 4851 }, - { .temp_c = 50, .ohm = 4105 }, - { .temp_c = 55, .ohm = 3492 }, - { .temp_c = 60, .ohm = 2985 }, - { .temp_c = 65, .ohm = 2563 }, - { .temp_c = 70, .ohm = 2211 }, - { .temp_c = 75, .ohm = 1915 }, - { .temp_c = 80, .ohm = 1666 }, - { .temp_c = 85, .ohm = 1454 }, - { .temp_c = 90, .ohm = 1275 }, - { .temp_c = 95, .ohm = 1121 }, - { .temp_c = 100, .ohm = 990 }, - { .temp_c = 105, .ohm = 876 }, - { .temp_c = 110, .ohm = 779 }, - { .temp_c = 115, .ohm = 694 }, - { .temp_c = 120, .ohm = 620 }, - { .temp_c = 125, .ohm = 556 }, + { .temp_c = 30, .ohm = 8315 }, + { .temp_c = 35, .ohm = 6948 }, + { .temp_c = 40, .ohm = 5834 }, + { .temp_c = 45, .ohm = 4917 }, + { .temp_c = 50, .ohm = 4161 }, + { .temp_c = 55, .ohm = 3535 }, + { .temp_c = 60, .ohm = 3014 }, + { .temp_c = 65, .ohm = 2586 }, + { .temp_c = 70, .ohm = 2228 }, + { .temp_c = 75, .ohm = 1925 }, + { .temp_c = 80, .ohm = 1669 }, + { .temp_c = 85, .ohm = 1452 }, + { .temp_c = 90, .ohm = 1268 }, + { .temp_c = 95, .ohm = 1110 }, + { .temp_c = 100, .ohm = 974 }, + { .temp_c = 105, .ohm = 858 }, + { .temp_c = 110, .ohm = 758 }, + { .temp_c = 115, .ohm = 672 }, + { .temp_c = 120, .ohm = 596 }, + { .temp_c = 125, .ohm = 531 }, }; /* diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index d0d386990af5..6366610a9082 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client, if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) { int page; + info->pages = PMBUS_PAGES; + for (page = 1; page < PMBUS_PAGES; page++) { if (pmbus_set_page(client, page, 0xff) < 0) break; diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c index b9a93ee9c236..cbe29c8a9b18 100644 --- a/drivers/hwmon/tmp513.c +++ b/drivers/hwmon/tmp513.c @@ -19,15 +19,20 @@ * the Free Software Foundation; version 2 of the License. */ +#include +#include +#include #include #include #include #include -#include +#include #include +#include #include #include -#include +#include +#include // Common register definition #define TMP51X_SHUNT_CONFIG 0x00 @@ -100,8 +105,8 @@ #define TMP51X_REMOTE_TEMP_LIMIT_2_POS 8 #define TMP513_REMOTE_TEMP_LIMIT_3_POS 7 -#define TMP51X_VBUS_RANGE_32V 32000000 -#define TMP51X_VBUS_RANGE_16V 16000000 +#define TMP51X_VBUS_RANGE_32V (32 * MICRO) +#define TMP51X_VBUS_RANGE_16V (16 * MICRO) // Max and Min value #define MAX_BUS_VOLTAGE_32_LIMIT 32764 @@ -173,7 +178,7 @@ struct tmp51x_data { struct regmap *regmap; }; -// Set the shift based on the gain 8=4, 4=3, 2=2, 1=1 +// Set the shift based on the gain: 8 -> 1, 4 -> 2, 2 -> 3, 1 -> 4 static inline u8 tmp51x_get_pga_shift(struct tmp51x_data *data) { return 5 - ffs(data->pga_gain); @@ -195,8 +200,11 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos, * 2's complement number shifted by one to four depending * on the pga gain setting. 1lsb = 10uV */ - *val = sign_extend32(regval, 17 - tmp51x_get_pga_shift(data)); - *val = DIV_ROUND_CLOSEST(*val * 10000, data->shunt_uohms); + *val = sign_extend32(regval, + reg == TMP51X_SHUNT_CURRENT_RESULT ? + 16 - tmp51x_get_pga_shift(data) : 15); + *val = DIV_ROUND_CLOSEST(*val * 10 * (long)MILLI, (long)data->shunt_uohms); + break; case TMP51X_BUS_VOLTAGE_RESULT: case TMP51X_BUS_VOLTAGE_H_LIMIT: @@ -211,8 +219,8 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos, break; case TMP51X_BUS_CURRENT_RESULT: // Current = (ShuntVoltage * CalibrationRegister) / 4096 - *val = sign_extend32(regval, 16) * data->curr_lsb_ua; - *val = DIV_ROUND_CLOSEST(*val, 1000); + *val = sign_extend32(regval, 15) * (long)data->curr_lsb_ua; + *val = DIV_ROUND_CLOSEST(*val, (long)MILLI); break; case TMP51X_LOCAL_TEMP_RESULT: case TMP51X_REMOTE_TEMP_RESULT_1: @@ -223,7 +231,7 @@ static int tmp51x_get_value(struct tmp51x_data *data, u8 reg, u8 pos, case TMP51X_REMOTE_TEMP_LIMIT_2: case TMP513_REMOTE_TEMP_LIMIT_3: // 1lsb = 0.0625 degrees centigrade - *val = sign_extend32(regval, 16) >> TMP51X_TEMP_SHIFT; + *val = sign_extend32(regval, 15) >> TMP51X_TEMP_SHIFT; *val = DIV_ROUND_CLOSEST(*val * 625, 10); break; case TMP51X_N_FACTOR_AND_HYST_1: @@ -252,7 +260,7 @@ static int tmp51x_set_value(struct tmp51x_data *data, u8 reg, long val) * The user enter current value and we convert it to * voltage. 1lsb = 10uV */ - val = DIV_ROUND_CLOSEST(val * data->shunt_uohms, 10000); + val = DIV_ROUND_CLOSEST(val * (long)data->shunt_uohms, 10 * (long)MILLI); max_val = U16_MAX >> tmp51x_get_pga_shift(data); regval = clamp_val(val, -max_val, max_val); break; @@ -542,18 +550,16 @@ static int tmp51x_calibrate(struct tmp51x_data *data) if (data->shunt_uohms == 0) return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION, 0); - max_curr_ma = DIV_ROUND_CLOSEST_ULL(vshunt_max * 1000 * 1000, - data->shunt_uohms); + max_curr_ma = DIV_ROUND_CLOSEST_ULL(vshunt_max * MICRO, data->shunt_uohms); /* * Calculate the minimal bit resolution for the current and the power. * Those values will be used during register interpretation. */ - data->curr_lsb_ua = DIV_ROUND_CLOSEST_ULL(max_curr_ma * 1000, 32767); + data->curr_lsb_ua = DIV_ROUND_CLOSEST_ULL(max_curr_ma * MILLI, 32767); data->pwr_lsb_uw = 20 * data->curr_lsb_ua; - div = DIV_ROUND_CLOSEST_ULL(data->curr_lsb_ua * data->shunt_uohms, - 1000 * 1000); + div = DIV_ROUND_CLOSEST_ULL(data->curr_lsb_ua * data->shunt_uohms, MICRO); return regmap_write(data->regmap, TMP51X_SHUNT_CALIBRATION, DIV_ROUND_CLOSEST(40960, div)); @@ -628,9 +634,9 @@ static int tmp51x_vbus_range_to_reg(struct device *dev, } else if (data->vbus_range_uvolt == TMP51X_VBUS_RANGE_16V) { data->shunt_config &= ~TMP51X_BUS_VOLTAGE_MASK; } else { - dev_err(dev, "ti,bus-range-microvolt is invalid: %u\n", - data->vbus_range_uvolt); - return -EINVAL; + return dev_err_probe(dev, -EINVAL, + "ti,bus-range-microvolt is invalid: %u\n", + data->vbus_range_uvolt); } return 0; } @@ -646,8 +652,8 @@ static int tmp51x_pga_gain_to_reg(struct device *dev, struct tmp51x_data *data) } else if (data->pga_gain == 1) { data->shunt_config |= CURRENT_SENSE_VOLTAGE_40_MASK; } else { - dev_err(dev, "ti,pga-gain is invalid: %u\n", data->pga_gain); - return -EINVAL; + return dev_err_probe(dev, -EINVAL, + "ti,pga-gain is invalid: %u\n", data->pga_gain); } return 0; } @@ -679,10 +685,10 @@ static int tmp51x_read_properties(struct device *dev, struct tmp51x_data *data) memcpy(data->nfactor, nfactor, (data->id == tmp513) ? 3 : 2); // Check if shunt value is compatible with pga-gain - if (data->shunt_uohms > data->pga_gain * 40 * 1000 * 1000) { - dev_err(dev, "shunt-resistor: %u too big for pga_gain: %u\n", - data->shunt_uohms, data->pga_gain); - return -EINVAL; + if (data->shunt_uohms > data->pga_gain * 40 * MICRO) { + return dev_err_probe(dev, -EINVAL, + "shunt-resistor: %u too big for pga_gain: %u\n", + data->shunt_uohms, data->pga_gain); } return 0; @@ -726,22 +732,17 @@ static int tmp51x_probe(struct i2c_client *client) data->id = i2c_match_id(tmp51x_id, client)->driver_data; ret = tmp51x_configure(dev, data); - if (ret < 0) { - dev_err(dev, "error configuring the device: %d\n", ret); - return ret; - } + if (ret < 0) + return dev_err_probe(dev, ret, "error configuring the device\n"); data->regmap = devm_regmap_init_i2c(client, &tmp51x_regmap_config); - if (IS_ERR(data->regmap)) { - dev_err(dev, "failed to allocate register map\n"); - return PTR_ERR(data->regmap); - } + if (IS_ERR(data->regmap)) + return dev_err_probe(dev, PTR_ERR(data->regmap), + "failed to allocate register map\n"); ret = tmp51x_init(data); - if (ret < 0) { - dev_err(dev, "error configuring the device: %d\n", ret); - return -ENODEV; - } + if (ret < 0) + return dev_err_probe(dev, ret, "error configuring the device\n"); hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, data, diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c index 68c77c493270..152e3c1c5c5f 100644 --- a/drivers/hwmon/tps23861.c +++ b/drivers/hwmon/tps23861.c @@ -132,7 +132,7 @@ static int tps23861_read_temp(struct tps23861_data *data, long *val) if (err < 0) return err; - *val = (regval * TEMPERATURE_LSB) - 20000; + *val = ((long)regval * TEMPERATURE_LSB) - 20000; return 0; } diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 78d9f52e2a71..6768dbf39039 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -111,7 +111,7 @@ struct xgene_hwmon_dev { phys_addr_t comm_base_addr; void *pcc_comm_addr; - u64 usecs_lat; + unsigned int usecs_lat; }; /* @@ -712,7 +712,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev) goto out; } - if (!ctx->pcc_comm_addr) { + if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) { dev_err(&pdev->dev, "Failed to ioremap PCC comm region\n"); rc = -ENOMEM; diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c index bc90a03f478f..6ef7afa25631 100644 --- a/drivers/hwtracing/coresight/coresight-catu.c +++ b/drivers/hwtracing/coresight/coresight-catu.c @@ -267,7 +267,7 @@ catu_init_sg_table(struct device *catu_dev, int node, * Each table can address upto 1MB and we can have * CATU_PAGES_PER_SYSPAGE tables in a system page. */ - nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE; + nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M); catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages, size >> PAGE_SHIFT, pages); if (IS_ERR(catu_table)) diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index bcb08fadccf2..4477b1ab7357 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1478,18 +1478,20 @@ static void coresight_remove_conns(struct coresight_device *csdev) } /** - * coresight_timeout - loop until a bit has changed to a specific register - * state. + * coresight_timeout_action - loop until a bit has changed to a specific register + * state, with a callback after every trial. * @csa: coresight device access for the device * @offset: Offset of the register from the base of the device. * @position: the position of the bit of interest. * @value: the value the bit should have. + * @cb: Call back after each trial. * * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if * TIMEOUT_US has elapsed, which ever happens first. */ -int coresight_timeout(struct csdev_access *csa, u32 offset, - int position, int value) +int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb) { int i; u32 val; @@ -1505,7 +1507,8 @@ int coresight_timeout(struct csdev_access *csa, u32 offset, if (!(val & BIT(position))) return 0; } - + if (cb) + cb(csa, offset, position, value); /* * Delay is arbitrary - the specification doesn't say how long * we are expected to wait. Extra check required to make sure @@ -1517,6 +1520,13 @@ int coresight_timeout(struct csdev_access *csa, u32 offset, return -EAGAIN; } +EXPORT_SYMBOL_GPL(coresight_timeout_action); + +int coresight_timeout(struct csdev_access *csa, u32 offset, + int position, int value) +{ + return coresight_timeout_action(csa, offset, position, value, NULL); +} EXPORT_SYMBOL_GPL(coresight_timeout); u32 coresight_relaxed_read32(struct coresight_device *csdev, u32 offset) diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 354267edcb45..1ad689db74da 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -369,6 +369,29 @@ static void etm4_check_arch_features(struct etmv4_drvdata *drvdata, } #endif /* CONFIG_ETM4X_IMPDEF_FEATURE */ +static void etm4x_sys_ins_barrier(struct csdev_access *csa, u32 offset, int pos, int val) +{ + if (!csa->io_mem) + isb(); +} + +/* + * etm4x_wait_status: Poll for TRCSTATR. == . While using system + * instruction to access the trace unit, each access must be separated by a + * synchronization barrier. See ARM IHI0064H.b section "4.3.7 Synchronization of + * register updates", for system instructions section, in "Notes": + * + * "In particular, whenever disabling or enabling the trace unit, a poll of + * TRCSTATR needs explicit synchronization between each read of TRCSTATR" + */ +static int etm4x_wait_status(struct csdev_access *csa, int pos, int val) +{ + if (!csa->io_mem) + return coresight_timeout_action(csa, TRCSTATR, pos, val, + etm4x_sys_ins_barrier); + return coresight_timeout(csa, TRCSTATR, pos, val); +} + static int etm4_enable_hw(struct etmv4_drvdata *drvdata) { int i, rc; @@ -400,7 +423,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb(); /* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); if (drvdata->nr_pe) @@ -493,7 +516,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) isb(); /* wait for TRCSTATR.IDLE to go back down to '0' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 0)) + if (etm4x_wait_status(csa, TRCSTATR_IDLE_BIT, 0)) dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); @@ -845,10 +868,25 @@ static void etm4_disable_hw(void *info) tsb_csync(); etm4x_relaxed_write32(csa, control, TRCPRGCTLR); + /* + * As recommended by section 4.3.7 ("Synchronization when using system + * instructions to progrom the trace unit") of ARM IHI 0064H.b, the + * self-hosted trace analyzer must perform a Context synchronization + * event between writing to the TRCPRGCTLR and reading the TRCSTATR. + */ + if (!csa->io_mem) + isb(); + /* wait for TRCSTATR.PMSTABLE to go to '1' */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for PM stable Trace Status\n"); + /* + * As recommended by section 4.3.7 (Synchronization of register updates) + * of ARM IHI 0064H.b. + */ + isb(); + /* read the status of the single shot comparators */ for (i = 0; i < drvdata->nr_ss_cmp; i++) { config->ss_status[i] = @@ -1593,7 +1631,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) etm4_os_lock(drvdata); /* wait for TRCSTATR.PMSTABLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for PM Stable Status\n"); etm4_os_unlock(drvdata); @@ -1684,7 +1722,7 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) state->trcpdcr = etm4x_read32(csa, TRCPDCR); /* wait for TRCSTATR.IDLE to go up */ - if (coresight_timeout(csa, TRCSTATR, TRCSTATR_IDLE_BIT, 1)) { + if (etm4x_wait_status(csa, TRCSTATR_PMSTABLE_BIT, 1)) { dev_err(etm_dev, "timeout while waiting for Idle Trace Status\n"); etm4_os_unlock(drvdata); diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 8dad239aba2c..e7985db1f29b 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -329,6 +329,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa824), .driver_data = (kernel_ulong_t)&intel_th_2x, }, + { + /* Arrow Lake */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Panther Lake-H */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { + /* Panther Lake-P/U */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, { /* Alder Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f), diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index ee83c4581bce..cd2c8afebe79 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -490,6 +490,8 @@ MODULE_DEVICE_TABLE(pci, ali1535_ids); static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (ali1535_setup(dev)) { dev_warn(&dev->dev, "ALI1535 not detected, module not inserted.\n"); @@ -501,7 +503,15 @@ static int ali1535_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name), "SMBus ALI1535 adapter at %04x", ali1535_offset); - return i2c_add_adapter(&ali1535_adapter); + ret = i2c_add_adapter(&ali1535_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(ali1535_smba, ALI1535_SMB_IOSIZE); + return ret; } static void ali1535_remove(struct pci_dev *dev) diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c index cc58feacd082..28a57cb6efb9 100644 --- a/drivers/i2c/busses/i2c-ali15x3.c +++ b/drivers/i2c/busses/i2c-ali15x3.c @@ -473,6 +473,8 @@ MODULE_DEVICE_TABLE (pci, ali15x3_ids); static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (ali15x3_setup(dev)) { dev_err(&dev->dev, "ALI15X3 not detected, module not inserted.\n"); @@ -484,7 +486,15 @@ static int ali15x3_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(ali15x3_adapter.name, sizeof(ali15x3_adapter.name), "SMBus ALI15X3 adapter at %04x", ali15x3_smba); - return i2c_add_adapter(&ali15x3_adapter); + ret = i2c_add_adapter(&ali15x3_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(ali15x3_smba, ALI15X3_SMB_IOSIZE); + return ret; } static void ali15x3_remove(struct pci_dev *dev) diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c index 4e787dc709f9..d13b5f804540 100644 --- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c +++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c @@ -247,6 +247,9 @@ static int ec_i2c_probe(struct platform_device *pdev) u32 remote_bus; int err; + if (!ec) + return dev_err_probe(dev, -EPROBE_DEFER, "couldn't find parent EC device\n"); + if (!ec->cmd_xfer) { dev_err(dev, "Missing sendrecv\n"); return -EINVAL; diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c index ff12018bc206..45d97d611ba1 100644 --- a/drivers/i2c/busses/i2c-imx-lpi2c.c +++ b/drivers/i2c/busses/i2c-imx-lpi2c.c @@ -616,9 +616,9 @@ static int lpi2c_imx_probe(struct platform_device *pdev) return 0; rpm_disable: - pm_runtime_put(&pdev->dev); - pm_runtime_disable(&pdev->dev); pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); return ret; } diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 0c203c614197..76d5b80abfc7 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -287,6 +287,7 @@ static const struct of_device_id i2c_imx_dt_ids[] = { { .compatible = "fsl,imx6sll-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx6sx-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx6ul-i2c", .data = &imx6_i2c_hwdata, }, + { .compatible = "fsl,imx7d-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx7s-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mm-i2c", .data = &imx6_i2c_hwdata, }, { .compatible = "fsl,imx8mn-i2c", .data = &imx6_i2c_hwdata, }, diff --git a/drivers/i2c/busses/i2c-microchip-corei2c.c b/drivers/i2c/busses/i2c-microchip-corei2c.c index 4d7e9b25f018..71edb0d38e6a 100644 --- a/drivers/i2c/busses/i2c-microchip-corei2c.c +++ b/drivers/i2c/busses/i2c-microchip-corei2c.c @@ -93,27 +93,35 @@ * @base: pointer to register struct * @dev: device reference * @i2c_clk: clock reference for i2c input clock + * @msg_queue: pointer to the messages requiring sending * @buf: pointer to msg buffer for easier use * @msg_complete: xfer completion object * @adapter: core i2c abstraction * @msg_err: error code for completed message * @bus_clk_rate: current i2c bus clock rate * @isr_status: cached copy of local ISR status + * @total_num: total number of messages to be sent/received + * @current_num: index of the current message being sent/received * @msg_len: number of bytes transferred in msg * @addr: address of the current slave + * @restart_needed: whether or not a repeated start is required after current message */ struct mchp_corei2c_dev { void __iomem *base; struct device *dev; struct clk *i2c_clk; + struct i2c_msg *msg_queue; u8 *buf; struct completion msg_complete; struct i2c_adapter adapter; int msg_err; + int total_num; + int current_num; u32 bus_clk_rate; u32 isr_status; u16 msg_len; u8 addr; + bool restart_needed; }; static void mchp_corei2c_core_disable(struct mchp_corei2c_dev *idev) @@ -222,6 +230,47 @@ static int mchp_corei2c_fill_tx(struct mchp_corei2c_dev *idev) return 0; } +static void mchp_corei2c_next_msg(struct mchp_corei2c_dev *idev) +{ + struct i2c_msg *this_msg; + u8 ctrl; + + if (idev->current_num >= idev->total_num) { + complete(&idev->msg_complete); + return; + } + + /* + * If there's been an error, the isr needs to return control + * to the "main" part of the driver, so as not to keep sending + * messages once it completes and clears the SI bit. + */ + if (idev->msg_err) { + complete(&idev->msg_complete); + return; + } + + this_msg = idev->msg_queue++; + + if (idev->current_num < (idev->total_num - 1)) { + struct i2c_msg *next_msg = idev->msg_queue; + + idev->restart_needed = next_msg->flags & I2C_M_RD; + } else { + idev->restart_needed = false; + } + + idev->addr = i2c_8bit_addr_from_msg(this_msg); + idev->msg_len = this_msg->len; + idev->buf = this_msg->buf; + + ctrl = readb(idev->base + CORE_I2C_CTRL); + ctrl |= CTRL_STA; + writeb(ctrl, idev->base + CORE_I2C_CTRL); + + idev->current_num++; +} + static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) { u32 status = idev->isr_status; @@ -238,8 +287,6 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) ctrl &= ~CTRL_STA; writeb(idev->addr, idev->base + CORE_I2C_DATA); writeb(ctrl, idev->base + CORE_I2C_CTRL); - if (idev->msg_len == 0) - finished = true; break; case STATUS_M_ARB_LOST: idev->msg_err = -EAGAIN; @@ -247,10 +294,14 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) break; case STATUS_M_SLAW_ACK: case STATUS_M_TX_DATA_ACK: - if (idev->msg_len > 0) + if (idev->msg_len > 0) { mchp_corei2c_fill_tx(idev); - else - last_byte = true; + } else { + if (idev->restart_needed) + finished = true; + else + last_byte = true; + } break; case STATUS_M_TX_DATA_NACK: case STATUS_M_SLAR_NACK: @@ -287,7 +338,7 @@ static irqreturn_t mchp_corei2c_handle_isr(struct mchp_corei2c_dev *idev) mchp_corei2c_stop(idev); if (last_byte || finished) - complete(&idev->msg_complete); + mchp_corei2c_next_msg(idev); return IRQ_HANDLED; } @@ -311,21 +362,48 @@ static irqreturn_t mchp_corei2c_isr(int irq, void *_dev) return ret; } -static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev, - struct i2c_msg *msg) +static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) { - u8 ctrl; + struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap); + struct i2c_msg *this_msg = msgs; unsigned long time_left; - - idev->addr = i2c_8bit_addr_from_msg(msg); - idev->msg_len = msg->len; - idev->buf = msg->buf; - idev->msg_err = 0; - - reinit_completion(&idev->msg_complete); + u8 ctrl; mchp_corei2c_core_enable(idev); + /* + * The isr controls the flow of a transfer, this info needs to be saved + * to a location that it can access the queue information from. + */ + idev->restart_needed = false; + idev->msg_queue = msgs; + idev->total_num = num; + idev->current_num = 0; + + /* + * But the first entry to the isr is triggered by the start in this + * function, so the first message needs to be "dequeued". + */ + idev->addr = i2c_8bit_addr_from_msg(this_msg); + idev->msg_len = this_msg->len; + idev->buf = this_msg->buf; + idev->msg_err = 0; + + if (idev->total_num > 1) { + struct i2c_msg *next_msg = msgs + 1; + + idev->restart_needed = next_msg->flags & I2C_M_RD; + } + + idev->current_num++; + idev->msg_queue++; + + reinit_completion(&idev->msg_complete); + + /* + * Send the first start to pass control to the isr + */ ctrl = readb(idev->base + CORE_I2C_CTRL); ctrl |= CTRL_STA; writeb(ctrl, idev->base + CORE_I2C_CTRL); @@ -335,20 +413,8 @@ static int mchp_corei2c_xfer_msg(struct mchp_corei2c_dev *idev, if (!time_left) return -ETIMEDOUT; - return idev->msg_err; -} - -static int mchp_corei2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, - int num) -{ - struct mchp_corei2c_dev *idev = i2c_get_adapdata(adap); - int i, ret; - - for (i = 0; i < num; i++) { - ret = mchp_corei2c_xfer_msg(idev, msgs++); - if (ret) - return ret; - } + if (idev->msg_err) + return idev->msg_err; return num; } diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 767dd15b3c88..0947e3d155c5 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2335,6 +2335,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev) if (irq < 0) return irq; + /* + * Disable the interrupt to avoid the interrupt handler being triggered + * incorrectly by the asynchronous interrupt status since the machine + * might do a warm reset during the last smbus/i2c transfer session. + */ + npcm_i2c_int_enable(bus, false); + ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0, dev_name(bus->dev), bus); if (ret) diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 7ec252199706..256fa766b1d3 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -1049,23 +1049,6 @@ static int omap_i2c_transmit_data(struct omap_i2c_dev *omap, u8 num_bytes, return 0; } -static irqreturn_t -omap_i2c_isr(int irq, void *dev_id) -{ - struct omap_i2c_dev *omap = dev_id; - irqreturn_t ret = IRQ_HANDLED; - u16 mask; - u16 stat; - - stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); - mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG) & ~OMAP_I2C_STAT_NACK; - - if (stat & mask) - ret = IRQ_WAKE_THREAD; - - return ret; -} - static int omap_i2c_xfer_data(struct omap_i2c_dev *omap) { u16 bits; @@ -1096,8 +1079,13 @@ static int omap_i2c_xfer_data(struct omap_i2c_dev *omap) } if (stat & OMAP_I2C_STAT_NACK) { - err |= OMAP_I2C_STAT_NACK; + omap->cmd_err |= OMAP_I2C_STAT_NACK; omap_i2c_ack_stat(omap, OMAP_I2C_STAT_NACK); + + if (!(stat & ~OMAP_I2C_STAT_NACK)) { + err = -EAGAIN; + break; + } } if (stat & OMAP_I2C_STAT_AL) { @@ -1475,7 +1463,7 @@ omap_i2c_probe(struct platform_device *pdev) IRQF_NO_SUSPEND, pdev->name, omap); else r = devm_request_threaded_irq(&pdev->dev, omap->irq, - omap_i2c_isr, omap_i2c_isr_thread, + NULL, omap_i2c_isr_thread, IRQF_NO_SUSPEND | IRQF_ONESHOT, pdev->name, omap); diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index d2c09b0fdf52..ab87bef50402 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -95,7 +95,7 @@ enum { static inline int wait_timeout(struct i2c_pnx_algo_data *data) { - long timeout = data->timeout; + long timeout = jiffies_to_msecs(data->timeout); while (timeout > 0 && (ioread32(I2C_REG_STS(data)) & mstatus_active)) { mdelay(1); @@ -106,7 +106,7 @@ static inline int wait_timeout(struct i2c_pnx_algo_data *data) static inline int wait_reset(struct i2c_pnx_algo_data *data) { - long timeout = data->timeout; + long timeout = jiffies_to_msecs(data->timeout); while (timeout > 0 && (ioread32(I2C_REG_CTL(data)) & mcntrl_reset)) { mdelay(1); diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c index ade3f0ea5955..8263e017577d 100644 --- a/drivers/i2c/busses/i2c-pxa.c +++ b/drivers/i2c/busses/i2c-pxa.c @@ -1508,7 +1508,10 @@ static int i2c_pxa_probe(struct platform_device *dev) i2c->adap.name); } - clk_prepare_enable(i2c->clk); + ret = clk_prepare_enable(i2c->clk); + if (ret) + return dev_err_probe(&dev->dev, ret, + "failed to enable clock\n"); if (i2c->use_pio) { i2c->adap.algo = &i2c_pxa_pio_algorithm; diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 78682388e02e..82de4651d18f 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -150,6 +151,8 @@ /* TAG length for DATA READ in RX FIFO */ #define READ_RX_TAGS_LEN 2 +#define QUP_BUS_WIDTH 8 + static unsigned int scl_freq; module_param_named(scl_freq, scl_freq, uint, 0444); MODULE_PARM_DESC(scl_freq, "SCL frequency override"); @@ -227,6 +230,7 @@ struct qup_i2c_dev { int irq; struct clk *clk; struct clk *pclk; + struct icc_path *icc_path; struct i2c_adapter adap; int clk_ctl; @@ -255,6 +259,10 @@ struct qup_i2c_dev { /* To configure when bus is in run state */ u32 config_run; + /* bandwidth votes */ + u32 src_clk_freq; + u32 cur_bw_clk_freq; + /* dma parameters */ bool is_dma; /* To check if the current transfer is using DMA */ @@ -453,6 +461,23 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len) return ret; } +static int qup_i2c_vote_bw(struct qup_i2c_dev *qup, u32 clk_freq) +{ + u32 needed_peak_bw; + int ret; + + if (qup->cur_bw_clk_freq == clk_freq) + return 0; + + needed_peak_bw = Bps_to_icc(clk_freq * QUP_BUS_WIDTH); + ret = icc_set_bw(qup->icc_path, 0, needed_peak_bw); + if (ret) + return ret; + + qup->cur_bw_clk_freq = clk_freq; + return 0; +} + static void qup_i2c_write_tx_fifo_v1(struct qup_i2c_dev *qup) { struct qup_i2c_block *blk = &qup->blk; @@ -840,6 +865,10 @@ static int qup_i2c_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int ret = 0; int idx = 0; + ret = qup_i2c_vote_bw(qup, qup->src_clk_freq); + if (ret) + return ret; + enable_irq(qup->irq); ret = qup_i2c_req_dma(qup); @@ -1645,6 +1674,7 @@ static void qup_i2c_disable_clocks(struct qup_i2c_dev *qup) config = readl(qup->base + QUP_CONFIG); config |= QUP_CLOCK_AUTO_GATE; writel(config, qup->base + QUP_CONFIG); + qup_i2c_vote_bw(qup, 0); clk_disable_unprepare(qup->pclk); } @@ -1745,6 +1775,11 @@ static int qup_i2c_probe(struct platform_device *pdev) goto fail_dma; } qup->is_dma = true; + + qup->icc_path = devm_of_icc_get(&pdev->dev, NULL); + if (IS_ERR(qup->icc_path)) + return dev_err_probe(&pdev->dev, PTR_ERR(qup->icc_path), + "failed to get interconnect path\n"); } nodma: @@ -1793,6 +1828,7 @@ nodma: qup_i2c_enable_clocks(qup); src_clk_freq = clk_get_rate(qup->clk); } + qup->src_clk_freq = src_clk_freq; /* * Bootloaders might leave a pending interrupt on certain QUP's, diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d0098e342ba2..c7f2a9d8bcd5 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -110,6 +110,8 @@ #define ID_P_PM_BLOCKED BIT(31) #define ID_P_MASK GENMASK(31, 28) +#define ID_SLAVE_NACK BIT(0) + enum rcar_i2c_type { I2C_RCAR_GEN1, I2C_RCAR_GEN2, @@ -143,6 +145,7 @@ struct rcar_i2c_priv { int irq; struct i2c_client *host_notify_client; + u8 slave_flags; }; #define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) @@ -597,6 +600,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) { u32 ssr_raw, ssr_filtered; u8 value; + int ret; ssr_raw = rcar_i2c_read(priv, ICSSR) & 0xff; ssr_filtered = ssr_raw & rcar_i2c_read(priv, ICSIER); @@ -612,7 +616,10 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) rcar_i2c_write(priv, ICRXTX, value); rcar_i2c_write(priv, ICSIER, SDE | SSR | SAR); } else { - i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value); + ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_REQUESTED, &value); + if (ret) + priv->slave_flags |= ID_SLAVE_NACK; + rcar_i2c_read(priv, ICRXTX); /* dummy read */ rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); } @@ -625,18 +632,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */ + priv->slave_flags &= ~ID_SLAVE_NACK; rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } /* master wants to write to us */ if (ssr_filtered & SDR) { - int ret; - value = rcar_i2c_read(priv, ICRXTX); ret = i2c_slave_event(priv->slave, I2C_SLAVE_WRITE_RECEIVED, &value); - /* Send NACK in case of error */ - rcar_i2c_write(priv, ICSCR, SIE | SDBS | (ret < 0 ? FNA : 0)); + if (ret) + priv->slave_flags |= ID_SLAVE_NACK; + + /* Send NACK in case of error, but it will come 1 byte late :( */ + rcar_i2c_write(priv, ICSCR, SIE | SDBS | + (priv->slave_flags & ID_SLAVE_NACK ? FNA : 0)); rcar_i2c_write(priv, ICSSR, ~SDR & 0xff); } diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index b9959621cc5d..6f8ce656183c 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -325,7 +325,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t) if (brl <= (0x1F + 3)) break; - total_ticks /= 2; + total_ticks = DIV_ROUND_UP(total_ticks, 2); rate /= 2; } diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index 87d56250d78a..c42ecadac4f2 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -509,6 +509,8 @@ MODULE_DEVICE_TABLE(pci, sis630_ids); static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) { + int ret; + if (sis630_setup(dev)) { dev_err(&dev->dev, "SIS630 compatible bus not detected, " @@ -522,7 +524,15 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id) snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS); - return i2c_add_adapter(&sis630_adapter); + ret = i2c_add_adapter(&sis630_adapter); + if (ret) + goto release_region; + + return 0; + +release_region: + release_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION); + return ret; } static void sis630_remove(struct pci_dev *dev) diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c index 14ae0cfc325e..d2499f302b50 100644 --- a/drivers/i2c/i2c-core-acpi.c +++ b/drivers/i2c/i2c-core-acpi.c @@ -355,6 +355,25 @@ static const struct acpi_device_id i2c_acpi_force_400khz_device_ids[] = { {} }; +static const struct acpi_device_id i2c_acpi_force_100khz_device_ids[] = { + /* + * When a 400KHz freq is used on this model of ELAN touchpad in Linux, + * excessive smoothing (similar to when the touchpad's firmware detects + * a noisy signal) is sometimes applied. As some devices' (e.g, Lenovo + * V15 G4) ACPI tables specify a 400KHz frequency for this device and + * some I2C busses (e.g, Designware I2C) default to a 400KHz freq, + * force the speed to 100KHz as a workaround. + * + * For future investigation: This problem may be related to the default + * HCNT/LCNT values given by some busses' drivers, because they are not + * specified in the aforementioned devices' ACPI tables, and because + * the device works without issues on Windows at what is expected to be + * a 400KHz frequency. The root cause of the issue is not known. + */ + { "ELAN06FA", 0 }, + {} +}; + static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, void *data, void **return_value) { @@ -373,6 +392,9 @@ static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, if (acpi_match_device_ids(adev, i2c_acpi_force_400khz_device_ids) == 0) lookup->force_speed = I2C_MAX_FAST_MODE_FREQ; + if (acpi_match_device_ids(adev, i2c_acpi_force_100khz_device_ids) == 0) + lookup->force_speed = I2C_MAX_STANDARD_MODE_FREQ; + return AE_OK; } diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 45a3f7e7b3f6..cea057704c00 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -261,7 +261,9 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) pm_runtime_no_callbacks(&pdev->dev); /* switch to first parent as active master */ - i2c_demux_activate_master(priv, 0); + err = i2c_demux_activate_master(priv, 0); + if (err) + goto err_rollback; err = device_create_file(&pdev->dev, &dev_attr_available_masters); if (err) diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index ab0b5691b03e..18103c1e8d76 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -279,7 +279,8 @@ static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env) struct i3c_device_info devinfo; u16 manuf, part, ext; - i3c_device_get_info(i3cdev, &devinfo); + if (i3cdev->desc) + devinfo = i3cdev->desc->info; manuf = I3C_PID_MANUF_ID(devinfo.pid); part = I3C_PID_PART_ID(devinfo.pid); ext = I3C_PID_EXTRA_INFO(devinfo.pid); @@ -341,10 +342,10 @@ struct bus_type i3c_bus_type = { }; static enum i3c_addr_slot_status -i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr) +i3c_bus_get_addr_slot_status_mask(struct i3c_bus *bus, u16 addr, u32 mask) { unsigned long status; - int bitpos = addr * 2; + int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS; if (addr > I2C_MAX_ADDR) return I3C_ADDR_SLOT_RSVD; @@ -352,22 +353,33 @@ i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr) status = bus->addrslots[bitpos / BITS_PER_LONG]; status >>= bitpos % BITS_PER_LONG; - return status & I3C_ADDR_SLOT_STATUS_MASK; + return status & mask; } -static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr, - enum i3c_addr_slot_status status) +static enum i3c_addr_slot_status +i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr) { - int bitpos = addr * 2; + return i3c_bus_get_addr_slot_status_mask(bus, addr, I3C_ADDR_SLOT_STATUS_MASK); +} + +static void i3c_bus_set_addr_slot_status_mask(struct i3c_bus *bus, u16 addr, + enum i3c_addr_slot_status status, u32 mask) +{ + int bitpos = addr * I3C_ADDR_SLOT_STATUS_BITS; unsigned long *ptr; if (addr > I2C_MAX_ADDR) return; ptr = bus->addrslots + (bitpos / BITS_PER_LONG); - *ptr &= ~((unsigned long)I3C_ADDR_SLOT_STATUS_MASK << - (bitpos % BITS_PER_LONG)); - *ptr |= (unsigned long)status << (bitpos % BITS_PER_LONG); + *ptr &= ~((unsigned long)mask << (bitpos % BITS_PER_LONG)); + *ptr |= ((unsigned long)status & mask) << (bitpos % BITS_PER_LONG); +} + +static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr, + enum i3c_addr_slot_status status) +{ + i3c_bus_set_addr_slot_status_mask(bus, addr, status, I3C_ADDR_SLOT_STATUS_MASK); } static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr) @@ -379,13 +391,44 @@ static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr) return status == I3C_ADDR_SLOT_FREE; } +/* + * ┌────┬─────────────┬───┬─────────┬───┐ + * │S/Sr│ 7'h7E RnW=0 │ACK│ ENTDAA │ T ├────┐ + * └────┴─────────────┴───┴─────────┴───┘ │ + * ┌─────────────────────────────────────────┘ + * │ ┌──┬─────────────┬───┬─────────────────┬────────────────┬───┬─────────┐ + * └─►│Sr│7'h7E RnW=1 │ACK│48bit UID BCR DCR│Assign 7bit Addr│PAR│ ACK/NACK│ + * └──┴─────────────┴───┴─────────────────┴────────────────┴───┴─────────┘ + * Some master controllers (such as HCI) need to prepare the entire above transaction before + * sending it out to the I3C bus. This means that a 7-bit dynamic address needs to be allocated + * before knowing the target device's UID information. + * + * However, some I3C targets may request specific addresses (called as "init_dyn_addr"), which is + * typically specified by the DT-'s assigned-address property. Lower addresses having higher IBI + * priority. If it is available, i3c_bus_get_free_addr() preferably return a free address that is + * not in the list of desired addresses (called as "init_dyn_addr"). This allows the device with + * the "init_dyn_addr" to switch to its "init_dyn_addr" when it hot-joins the I3C bus. Otherwise, + * if the "init_dyn_addr" is already in use by another I3C device, the target device will not be + * able to switch to its desired address. + * + * If the previous step fails, fallback returning one of the remaining unassigned address, + * regardless of its state in the desired list. + */ static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr) { enum i3c_addr_slot_status status; u8 addr; for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) { - status = i3c_bus_get_addr_slot_status(bus, addr); + status = i3c_bus_get_addr_slot_status_mask(bus, addr, + I3C_ADDR_SLOT_EXT_STATUS_MASK); + if (status == I3C_ADDR_SLOT_FREE) + return addr; + } + + for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) { + status = i3c_bus_get_addr_slot_status_mask(bus, addr, + I3C_ADDR_SLOT_STATUS_MASK); if (status == I3C_ADDR_SLOT_FREE) return addr; } @@ -514,6 +557,88 @@ static ssize_t i2c_scl_frequency_show(struct device *dev, } static DEVICE_ATTR_RO(i2c_scl_frequency); +static int i3c_set_hotjoin(struct i3c_master_controller *master, bool enable) +{ + int ret; + + if (!master || !master->ops) + return -EINVAL; + + if (!master->ops->enable_hotjoin || !master->ops->disable_hotjoin) + return -EINVAL; + + i3c_bus_normaluse_lock(&master->bus); + + if (enable) + ret = master->ops->enable_hotjoin(master); + else + ret = master->ops->disable_hotjoin(master); + + master->hotjoin = enable; + + i3c_bus_normaluse_unlock(&master->bus); + + return ret; +} + +static ssize_t hotjoin_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct i3c_bus *i3cbus = dev_to_i3cbus(dev); + int ret; + bool res; + + if (!i3cbus->cur_master) + return -EINVAL; + + if (kstrtobool(buf, &res)) + return -EINVAL; + + ret = i3c_set_hotjoin(i3cbus->cur_master->common.master, res); + if (ret) + return ret; + + return count; +} + +/* + * i3c_master_enable_hotjoin - Enable hotjoin + * @master: I3C master object + * + * Return: a 0 in case of success, an negative error code otherwise. + */ +int i3c_master_enable_hotjoin(struct i3c_master_controller *master) +{ + return i3c_set_hotjoin(master, true); +} +EXPORT_SYMBOL_GPL(i3c_master_enable_hotjoin); + +/* + * i3c_master_disable_hotjoin - Disable hotjoin + * @master: I3C master object + * + * Return: a 0 in case of success, an negative error code otherwise. + */ +int i3c_master_disable_hotjoin(struct i3c_master_controller *master) +{ + return i3c_set_hotjoin(master, false); +} +EXPORT_SYMBOL_GPL(i3c_master_disable_hotjoin); + +static ssize_t hotjoin_show(struct device *dev, struct device_attribute *da, char *buf) +{ + struct i3c_bus *i3cbus = dev_to_i3cbus(dev); + ssize_t ret; + + i3c_bus_normaluse_lock(i3cbus); + ret = sysfs_emit(buf, "%d\n", i3cbus->cur_master->common.master->hotjoin); + i3c_bus_normaluse_unlock(i3cbus); + + return ret; +} + +static DEVICE_ATTR_RW(hotjoin); + static struct attribute *i3c_masterdev_attrs[] = { &dev_attr_mode.attr, &dev_attr_current_master.attr, @@ -524,6 +649,7 @@ static struct attribute *i3c_masterdev_attrs[] = { &dev_attr_pid.attr, &dev_attr_dynamic_address.attr, &dev_attr_hdrcap.attr, + &dev_attr_hotjoin.attr, NULL, }; ATTRIBUTE_GROUPS(i3c_masterdev); @@ -1281,7 +1407,7 @@ static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev) I3C_ADDR_SLOT_FREE); if (dev->boardinfo && dev->boardinfo->init_dyn_addr) - i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr, + i3c_bus_set_addr_slot_status(&master->bus, dev->boardinfo->init_dyn_addr, I3C_ADDR_SLOT_FREE); } @@ -1366,16 +1492,9 @@ static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr) { struct i3c_master_controller *master = i3c_dev_get_master(dev); - enum i3c_addr_slot_status status; int ret; - if (dev->info.dyn_addr != old_dyn_addr && - (!dev->boardinfo || - dev->info.dyn_addr != dev->boardinfo->init_dyn_addr)) { - status = i3c_bus_get_addr_slot_status(&master->bus, - dev->info.dyn_addr); - if (status != I3C_ADDR_SLOT_FREE) - return -EBUSY; + if (dev->info.dyn_addr != old_dyn_addr) { i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr, I3C_ADDR_SLOT_I3C_DEV); @@ -1727,6 +1846,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master) goto err_bus_cleanup; } + if (master->ops->set_speed) { + ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_SLOW_SPEED); + if (ret) + goto err_bus_cleanup; + } + /* * Reset all dynamic address that may have been assigned before * (assigned by the bootloader for example). @@ -1735,6 +1860,12 @@ static int i3c_master_bus_init(struct i3c_master_controller *master) if (ret && ret != I3C_ERROR_M2) goto err_bus_cleanup; + if (master->ops->set_speed) { + ret = master->ops->set_speed(master, I3C_OPEN_DRAIN_NORMAL_SPEED); + if (ret) + goto err_bus_cleanup; + } + /* Disable all slave events before starting DAA. */ ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR, I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR | @@ -1765,9 +1896,11 @@ static int i3c_master_bus_init(struct i3c_master_controller *master) goto err_rstdaa; } - i3c_bus_set_addr_slot_status(&master->bus, - i3cboardinfo->init_dyn_addr, - I3C_ADDR_SLOT_I3C_DEV); + /* Do not mark as occupied until real device exist in bus */ + i3c_bus_set_addr_slot_status_mask(&master->bus, + i3cboardinfo->init_dyn_addr, + I3C_ADDR_SLOT_EXT_DESIRED, + I3C_ADDR_SLOT_EXT_STATUS_MASK); /* * Only try to create/attach devices that have a static @@ -1898,11 +2031,16 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, ibireq.max_payload_len = olddev->ibi->max_payload_len; ibireq.num_slots = olddev->ibi->num_slots; - if (olddev->ibi->enabled) { + if (olddev->ibi->enabled) enable_ibi = true; - i3c_dev_disable_ibi_locked(olddev); - } - + /* + * The olddev should not receive any commands on the + * i3c bus as it does not exist and has been assigned + * a new address. This will result in NACK or timeout. + * So, update the olddev->ibi->enabled flag to false + * to avoid DISEC with OldAddr. + */ + olddev->ibi->enabled = false; i3c_dev_free_ibi_locked(olddev); } mutex_unlock(&olddev->ibi_lock); @@ -1930,7 +2068,8 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, else expected_dyn_addr = newdev->info.dyn_addr; - if (newdev->info.dyn_addr != expected_dyn_addr) { + if (newdev->info.dyn_addr != expected_dyn_addr && + i3c_bus_get_addr_slot_status(&master->bus, expected_dyn_addr) == I3C_ADDR_SLOT_FREE) { /* * Try to apply the expected dynamic address. If it fails, keep * the address assigned by the master. @@ -2356,6 +2495,9 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master) */ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot) { + if (!dev->ibi || !slot) + return; + atomic_inc(&dev->ibi->pending_ibis); queue_work(dev->common.master->wq, &slot->work); } @@ -2696,17 +2838,13 @@ EXPORT_SYMBOL_GPL(i3c_master_register); * @master: master used to send frames on the bus * * Basically undo everything done in i3c_master_register(). - * - * Return: 0 in case of success, a negative error code otherwise. */ -int i3c_master_unregister(struct i3c_master_controller *master) +void i3c_master_unregister(struct i3c_master_controller *master) { i3c_master_i2c_adapter_cleanup(master); i3c_master_unregister_i3c_devs(master); i3c_master_bus_cleanup(master); device_unregister(&master->dev); - - return 0; } EXPORT_SYMBOL_GPL(i3c_master_unregister); diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index 51a8608203de..b72f6dce18b4 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -1184,11 +1184,8 @@ err_disable_core_clk: static int dw_i3c_remove(struct platform_device *pdev) { struct dw_i3c_master *master = platform_get_drvdata(pdev); - int ret; - ret = i3c_master_unregister(&master->base); - if (ret) - return ret; + i3c_master_unregister(&master->base); reset_control_assert(master->core_rst); diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index b9cfda6ae9ae..c5a37f58079a 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -1666,11 +1666,9 @@ err_disable_pclk: static int cdns_i3c_master_remove(struct platform_device *pdev) { struct cdns_i3c_master *master = platform_get_drvdata(pdev); - int ret; - ret = i3c_master_unregister(&master->base); - if (ret) - return ret; + cancel_work_sync(&master->hj_work); + i3c_master_unregister(&master->base); clk_disable_unprepare(master->sysclk); clk_disable_unprepare(master->pclk); diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index 6aef5ce43cc1..f9bc58366a72 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -769,7 +769,9 @@ static int i3c_hci_remove(struct platform_device *pdev) { struct i3c_hci *hci = platform_get_drvdata(pdev); - return i3c_master_unregister(&hci->master); + i3c_master_unregister(&hci->master); + + return 0; } static const __maybe_unused struct of_device_id i3c_hci_of_match[] = { diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c index edc3a69bfe31..bcc0c7d4131f 100644 --- a/drivers/i3c/master/mipi-i3c-hci/dma.c +++ b/drivers/i3c/master/mipi-i3c-hci/dma.c @@ -174,10 +174,10 @@ static void hci_dma_cleanup(struct i3c_hci *hci) for (i = 0; i < rings->total; i++) { rh = &rings->headers[i]; + rh_reg_write(INTR_SIGNAL_ENABLE, 0); rh_reg_write(RING_CONTROL, 0); rh_reg_write(CR_SETUP, 0); rh_reg_write(IBI_SETUP, 0); - rh_reg_write(INTR_SIGNAL_ENABLE, 0); if (rh->xfer) dma_free_coherent(&hci->master.dev, diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c index e71c90e5ac60..9b287f92d078 100644 --- a/drivers/i3c/master/svc-i3c-master.c +++ b/drivers/i3c/master/svc-i3c-master.c @@ -128,6 +128,9 @@ /* This parameter depends on the implementation and may be tuned */ #define SVC_I3C_FIFO_SIZE 16 +#define SVC_I3C_EVENT_IBI GENMASK(7, 0) +#define SVC_I3C_EVENT_HOTJOIN BIT(31) + struct svc_i3c_cmd { u8 addr; bool rnw; @@ -171,6 +174,8 @@ struct svc_i3c_xfer { * @ibi.tbq_slot: To be queued IBI slot * @ibi.lock: IBI lock * @lock: Transfer lock, protect between IBI work thread and callbacks from master + * @enabled_events: Bit masks for enable events (IBI, HotJoin). + * @mctrl_config: Configuration value in SVC_I3C_MCTRL for setting speed back. */ struct svc_i3c_master { struct i3c_master_controller base; @@ -199,6 +204,8 @@ struct svc_i3c_master { spinlock_t lock; } ibi; struct mutex lock; + u32 enabled_events; + u32 mctrl_config; }; /** @@ -213,6 +220,11 @@ struct svc_i3c_i2c_dev_data { struct i3c_generic_ibi_pool *ibi_pool; }; +static inline bool is_events_enabled(struct svc_i3c_master *master, u32 mask) +{ + return !!(master->enabled_events & mask); +} + static bool svc_i3c_master_error(struct svc_i3c_master *master) { u32 mstatus, merrwarn; @@ -356,7 +368,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master, slot->len < SVC_I3C_FIFO_SIZE) { mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL); count = SVC_I3C_MDATACTRL_RXCOUNT(mdatactrl); - readsl(master->regs + SVC_I3C_MRDATAB, buf, count); + readsb(master->regs + SVC_I3C_MRDATAB, buf, count); slot->len += count; buf += count; } @@ -432,13 +444,16 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) switch (ibitype) { case SVC_I3C_MSTATUS_IBITYPE_IBI: dev = svc_i3c_master_dev_from_addr(master, ibiaddr); - if (!dev) + if (!dev || !is_events_enabled(master, SVC_I3C_EVENT_IBI)) svc_i3c_master_nack_ibi(master); else svc_i3c_master_handle_ibi(master, dev); break; case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: - svc_i3c_master_ack_ibi(master, false); + if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN)) + svc_i3c_master_ack_ibi(master, false); + else + svc_i3c_master_nack_ibi(master); break; case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: svc_i3c_master_nack_ibi(master); @@ -475,9 +490,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work) svc_i3c_master_emit_stop(master); break; case SVC_I3C_MSTATUS_IBITYPE_HOT_JOIN: - queue_work(master->base.wq, &master->hj_work); + svc_i3c_master_emit_stop(master); + if (is_events_enabled(master, SVC_I3C_EVENT_HOTJOIN)) + queue_work(master->base.wq, &master->hj_work); break; case SVC_I3C_MSTATUS_IBITYPE_MASTER_REQUEST: + svc_i3c_master_emit_stop(master); + break; default: break; } @@ -506,6 +525,54 @@ static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static int svc_i3c_master_set_speed(struct i3c_master_controller *m, + enum i3c_open_drain_speed speed) +{ + struct svc_i3c_master *master = to_svc_i3c_master(m); + struct i3c_bus *bus = i3c_master_get_bus(&master->base); + u32 ppbaud, odbaud, odhpp, mconfig; + unsigned long fclk_rate; + int ret; + + ret = pm_runtime_resume_and_get(master->dev); + if (ret < 0) { + dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); + return ret; + } + + switch (speed) { + case I3C_OPEN_DRAIN_SLOW_SPEED: + fclk_rate = clk_get_rate(master->fclk); + if (!fclk_rate) { + ret = -EINVAL; + goto rpm_out; + } + /* + * Set 50% duty-cycle I2C speed to I3C OPEN-DRAIN mode, so the first + * broadcast address is visible to all I2C/I3C devices on the I3C bus. + * I3C device working as a I2C device will turn off its 50ns Spike + * Filter to change to I3C mode. + */ + mconfig = master->mctrl_config; + ppbaud = FIELD_GET(GENMASK(11, 8), mconfig); + odhpp = 0; + odbaud = DIV_ROUND_UP(fclk_rate, bus->scl_rate.i2c * (2 + 2 * ppbaud)) - 1; + mconfig &= ~GENMASK(24, 16); + mconfig |= SVC_I3C_MCONFIG_ODBAUD(odbaud) | SVC_I3C_MCONFIG_ODHPP(odhpp); + writel(mconfig, master->regs + SVC_I3C_MCONFIG); + break; + case I3C_OPEN_DRAIN_NORMAL_SPEED: + writel(master->mctrl_config, master->regs + SVC_I3C_MCONFIG); + break; + } + +rpm_out: + pm_runtime_mark_last_busy(master->dev); + pm_runtime_put_autosuspend(master->dev); + + return ret; +} + static int svc_i3c_master_bus_init(struct i3c_master_controller *m) { struct svc_i3c_master *master = to_svc_i3c_master(m); @@ -588,6 +655,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m) SVC_I3C_MCONFIG_I2CBAUD(i2cbaud); writel(reg, master->regs + SVC_I3C_MCONFIG); + master->mctrl_config = reg; /* Master core's registration */ ret = i3c_master_get_free_addr(m, 0); if (ret < 0) @@ -766,6 +834,8 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master, u32 reg; int ret, i; + svc_i3c_master_flush_fifo(master); + while (true) { /* Enter/proceed with DAA */ writel(SVC_I3C_MCTRL_REQUEST_PROC_DAA | @@ -877,7 +947,7 @@ static int svc_i3c_update_ibirules(struct svc_i3c_master *master) /* Create the IBIRULES register for both cases */ i3c_bus_for_each_i3cdev(&master->base.bus, dev) { - if (I3C_BCR_DEVICE_ROLE(dev->info.bcr) == I3C_BCR_I3C_MASTER) + if (!(dev->info.bcr & I3C_BCR_IBI_REQ_CAP)) continue; if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD) { @@ -944,12 +1014,27 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m) goto rpm_out; } - /* Register all devices who participated to the core */ - for (i = 0; i < dev_nb; i++) { - ret = i3c_master_add_i3c_dev_locked(m, addrs[i]); - if (ret) - goto rpm_out; - } + /* + * Register all devices who participated to the core + * + * If two devices (A and B) are detected in DAA and address 0xa is assigned to + * device A and 0xb to device B, a failure in i3c_master_add_i3c_dev_locked() + * for device A (addr: 0xa) could prevent device B (addr: 0xb) from being + * registered on the bus. The I3C stack might still consider 0xb a free + * address. If a subsequent Hotjoin occurs, 0xb might be assigned to Device A, + * causing both devices A and B to use the same address 0xb, violating the I3C + * specification. + * + * The return value for i3c_master_add_i3c_dev_locked() should not be checked + * because subsequent steps will scan the entire I3C bus, independent of + * whether i3c_master_add_i3c_dev_locked() returns success. + * + * If device A registration fails, there is still a chance to register device + * B. i3c_master_add_i3c_dev_locked() can reset DAA if a failure occurs while + * retrieving device information. + */ + for (i = 0; i < dev_nb; i++) + i3c_master_add_i3c_dev_locked(m, addrs[i]); /* Configure IBI auto-rules */ ret = svc_i3c_update_ibirules(master); @@ -1475,6 +1560,7 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev) return ret; } + master->enabled_events++; svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); @@ -1486,7 +1572,9 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev) struct svc_i3c_master *master = to_svc_i3c_master(m); int ret; - svc_i3c_master_disable_interrupts(master); + master->enabled_events--; + if (!master->enabled_events) + svc_i3c_master_disable_interrupts(master); ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR); @@ -1496,6 +1584,39 @@ static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev) return ret; } +static int svc_i3c_master_enable_hotjoin(struct i3c_master_controller *m) +{ + struct svc_i3c_master *master = to_svc_i3c_master(m); + int ret; + + ret = pm_runtime_resume_and_get(master->dev); + if (ret < 0) { + dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__); + return ret; + } + + master->enabled_events |= SVC_I3C_EVENT_HOTJOIN; + + svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART); + + return 0; +} + +static int svc_i3c_master_disable_hotjoin(struct i3c_master_controller *m) +{ + struct svc_i3c_master *master = to_svc_i3c_master(m); + + master->enabled_events &= ~SVC_I3C_EVENT_HOTJOIN; + + if (!master->enabled_events) + svc_i3c_master_disable_interrupts(master); + + pm_runtime_mark_last_busy(master->dev); + pm_runtime_put_autosuspend(master->dev); + + return 0; +} + static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot) { @@ -1522,6 +1643,9 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = { .recycle_ibi_slot = svc_i3c_master_recycle_ibi_slot, .enable_ibi = svc_i3c_master_enable_ibi, .disable_ibi = svc_i3c_master_disable_ibi, + .enable_hotjoin = svc_i3c_master_enable_hotjoin, + .disable_hotjoin = svc_i3c_master_disable_hotjoin, + .set_speed = svc_i3c_master_set_speed, }; static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master) @@ -1639,8 +1763,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev) rpm_disable: pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); err_disable_clks: svc_i3c_master_unprepare_clks(master); @@ -1651,11 +1775,9 @@ err_disable_clks: static int svc_i3c_master_remove(struct platform_device *pdev) { struct svc_i3c_master *master = platform_get_drvdata(pdev); - int ret; - ret = i3c_master_unregister(&master->base); - if (ret) - return ret; + cancel_work_sync(&master->hj_work); + i3c_master_unregister(&master->base); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index bb3d10099ba4..ae07dc018e66 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #define INTEL_IDLE_VERSION "0.5.1" @@ -187,12 +188,12 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev, int ret; if (smt_active) - wrmsrl(MSR_IA32_SPEC_CTRL, 0); + native_wrmsrl(MSR_IA32_SPEC_CTRL, 0); ret = __intel_idle(dev, drv, index); if (smt_active) - wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl); + native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl); return ret; } @@ -1583,6 +1584,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) if (intel_idle_state_needs_timer_stop(state)) state->flags |= CPUIDLE_FLAG_TIMER_STOP; + if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + mark_tsc_unstable("TSC halts in idle"); + state->enter = intel_idle; state->enter_s2idle = intel_idle_s2idle; } diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c index dfb8e2e5bdf5..f063226a42f3 100644 --- a/drivers/iio/accel/adis16201.c +++ b/drivers/iio/accel/adis16201.c @@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = { BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12), ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X, - BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y, - BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14), + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12), IIO_CHAN_SOFT_TIMESTAMP(7) }; diff --git a/drivers/iio/accel/adxl355_core.c b/drivers/iio/accel/adxl355_core.c index 4bc648eac8b2..e73573cd773a 100644 --- a/drivers/iio/accel/adxl355_core.c +++ b/drivers/iio/accel/adxl355_core.c @@ -175,7 +175,7 @@ struct adxl355_data { u8 transf_buf[3]; struct { u8 buf[14]; - s64 ts; + aligned_s64 ts; } buffer; } __aligned(IIO_DMA_MINALIGN); }; diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index f1a41b92543a..af0ad4e5e4a5 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -622,18 +622,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr) if (ret) return ret; + st->odr = odr; + /* Activity timers depend on ODR */ ret = _adxl367_set_act_time_ms(st, st->act_time_ms); if (ret) return ret; - ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms); - if (ret) - return ret; - - st->odr = odr; - - return 0; + return _adxl367_set_inact_time_ms(st, st->inact_time_ms); } static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr) diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 3ba28c2ff68a..ad7b7770d526 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -711,7 +711,7 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, int val, int val2, long mask) { struct mma8452_data *data = iio_priv(indio_dev); - int i, ret; + int i, j, ret; ret = iio_device_claim_direct_mode(indio_dev); if (ret) @@ -771,14 +771,18 @@ static int mma8452_write_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: - ret = mma8452_get_odr_index(data); + j = mma8452_get_odr_index(data); for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) { - if (mma8452_os_ratio[i][ret] == val) { + if (mma8452_os_ratio[i][j] == val) { ret = mma8452_set_power_mode(data, i); break; } } + if (i == ARRAY_SIZE(mma8452_os_ratio)) { + ret = -EINVAL; + break; + } break; default: ret = -EINVAL; diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c index 2fded3759171..9b5c36f2322f 100644 --- a/drivers/iio/accel/msa311.c +++ b/drivers/iio/accel/msa311.c @@ -595,23 +595,25 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev, __le16 axis; int err; - err = pm_runtime_resume_and_get(dev); - if (err) - return err; - err = iio_device_claim_direct_mode(indio_dev); if (err) return err; + err = pm_runtime_resume_and_get(dev); + if (err) { + iio_device_release_direct_mode(indio_dev); + return err; + } + mutex_lock(&msa311->lock); err = msa311_get_axis(msa311, chan, &axis); mutex_unlock(&msa311->lock); - iio_device_release_direct_mode(indio_dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); + iio_device_release_direct_mode(indio_dev); + if (err) { dev_err(dev, "can't get axis %s (%pe)\n", chan->datasheet_name, ERR_PTR(err)); @@ -757,10 +759,6 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) unsigned int odr; int err; - err = pm_runtime_resume_and_get(dev); - if (err) - return err; - /* * Sampling frequency changing is prohibited when buffer mode is * enabled, because sometimes MSA311 chip returns outliers during @@ -770,6 +768,12 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) if (err) return err; + err = pm_runtime_resume_and_get(dev); + if (err) { + iio_device_release_direct_mode(indio_dev); + return err; + } + err = -EINVAL; for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++) if (val == msa311_odr_table[odr].integral && @@ -780,11 +784,11 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2) break; } - iio_device_release_direct_mode(indio_dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); + iio_device_release_direct_mode(indio_dev); + if (err) dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err)); diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 0d0884d3b246..307a607bf56c 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -146,7 +146,11 @@ struct ad7124_chip_info { struct ad7124_channel_config { bool live; unsigned int cfg_slot; - /* Following fields are used to compare equality. */ + /* + * Following fields are used to compare for equality. If you + * make adaptations in it, you most likely also have to adapt + * ad7124_find_similar_live_cfg(), too. + */ struct_group(config_props, enum ad7124_ref_sel refsel; bool bipolar; @@ -333,15 +337,38 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_ struct ad7124_channel_config *cfg) { struct ad7124_channel_config *cfg_aux; - ptrdiff_t cmp_size; int i; - cmp_size = sizeof_field(struct ad7124_channel_config, config_props); + /* + * This is just to make sure that the comparison is adapted after + * struct ad7124_channel_config was changed. + */ + static_assert(sizeof_field(struct ad7124_channel_config, config_props) == + sizeof(struct { + enum ad7124_ref_sel refsel; + bool bipolar; + bool buf_positive; + bool buf_negative; + unsigned int vref_mv; + unsigned int pga_bits; + unsigned int odr; + unsigned int odr_sel_bits; + unsigned int filter_type; + })); + for (i = 0; i < st->num_channels; i++) { cfg_aux = &st->channels[i].cfg; if (cfg_aux->live && - !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size)) + cfg->refsel == cfg_aux->refsel && + cfg->bipolar == cfg_aux->bipolar && + cfg->buf_positive == cfg_aux->buf_positive && + cfg->buf_negative == cfg_aux->buf_negative && + cfg->vref_mv == cfg_aux->vref_mv && + cfg->pga_bits == cfg_aux->pga_bits && + cfg->odr == cfg_aux->odr && + cfg->odr_sel_bits == cfg_aux->odr_sel_bits && + cfg->filter_type == cfg_aux->filter_type) return cfg_aux; } @@ -931,6 +958,9 @@ static int ad7124_setup(struct ad7124_state *st) * set all channels to this default value. */ ad7124_set_channel_odr(st, i, 10); + + /* Disable all channels to prevent unintended conversions. */ + ad_sd_write_reg(&st->sd, AD7124_CHANNEL(i), 2, 0); } return ret; diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c index 98648c679a55..2ace3aafe497 100644 --- a/drivers/iio/adc/ad7266.c +++ b/drivers/iio/adc/ad7266.c @@ -44,7 +44,7 @@ struct ad7266_state { */ struct { __be16 sample[2]; - s64 timestamp; + aligned_s64 timestamp; } data __aligned(IIO_DMA_MINALIGN); }; diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c index 287a0591533b..67c96572cecc 100644 --- a/drivers/iio/adc/ad7606_spi.c +++ b/drivers/iio/adc/ad7606_spi.c @@ -127,7 +127,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr) { .tx_buf = &st->d16[0], .len = 2, - .cs_change = 0, + .cs_change = 1, }, { .rx_buf = &st->d16[1], .len = 2, diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c index 70a25949142c..967f06cd3f94 100644 --- a/drivers/iio/adc/ad7768-1.c +++ b/drivers/iio/adc/ad7768-1.c @@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = { .channel = 0, .scan_index = 0, .scan_type = { - .sign = 'u', + .sign = 's', .realbits = 24, .storagebits = 32, .shift = 8, @@ -169,7 +169,7 @@ struct ad7768_state { union { struct { __be32 chan; - s64 timestamp; + aligned_s64 timestamp; } scan; __be32 d32; u8 d8[2]; @@ -370,12 +370,11 @@ static int ad7768_read_raw(struct iio_dev *indio_dev, return ret; ret = ad7768_scan_direct(indio_dev); - if (ret >= 0) - *val = ret; iio_device_release_direct_mode(indio_dev); if (ret < 0) return ret; + *val = sign_extend32(ret, chan->scan_type.realbits - 1); return IIO_VAL_INT; diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c index a813fe04787c..97b6774e7f58 100644 --- a/drivers/iio/adc/ad7780.c +++ b/drivers/iio/adc/ad7780.c @@ -152,7 +152,7 @@ static int ad7780_write_raw(struct iio_dev *indio_dev, switch (m) { case IIO_CHAN_INFO_SCALE: - if (val != 0) + if (val != 0 || val2 == 0) return -EINVAL; vref = st->int_vref_mv * 1000000LL; diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c index 9d6bf6d0927a..709ce2a50097 100644 --- a/drivers/iio/adc/ad7923.c +++ b/drivers/iio/adc/ad7923.c @@ -48,7 +48,7 @@ struct ad7923_state { struct spi_device *spi; - struct spi_transfer ring_xfer[5]; + struct spi_transfer ring_xfer[9]; struct spi_transfer scan_single_xfer[2]; struct spi_message ring_msg; struct spi_message scan_single_msg; @@ -64,7 +64,7 @@ struct ad7923_state { * Length = 8 channels + 4 extra for 8 byte timestamp */ __be16 rx_buf[12] __aligned(IIO_DMA_MINALIGN); - __be16 tx_buf[4]; + __be16 tx_buf[8]; }; struct ad7923_chip_info { diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 3ad5678f2613..7bd180d48cb6 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -329,7 +329,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { #define AT91_HWFIFO_MAX_SIZE_STR "128" #define AT91_HWFIFO_MAX_SIZE 128 -#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \ +#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits) \ { \ .type = IIO_VOLTAGE, \ .channel = num, \ @@ -337,7 +337,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .scan_index = index, \ .scan_type = { \ .sign = 'u', \ - .realbits = 14, \ + .realbits = rbits, \ .storagebits = 16, \ }, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ @@ -350,7 +350,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .indexed = 1, \ } -#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \ +#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \ + AT91_SAMA_CHAN_SINGLE(index, num, addr, 14) + +#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr) \ + AT91_SAMA_CHAN_SINGLE(index, num, addr, 16) + +#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits) \ { \ .type = IIO_VOLTAGE, \ .differential = 1, \ @@ -360,7 +366,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .scan_index = index, \ .scan_type = { \ .sign = 's', \ - .realbits = 14, \ + .realbits = rbits, \ .storagebits = 16, \ }, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ @@ -373,6 +379,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = { .indexed = 1, \ } +#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \ + AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14) + +#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr) \ + AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16) + #define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \ { \ .type = IIO_POSITIONRELATIVE, \ @@ -666,30 +678,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = { }; static const struct iio_chan_spec at91_sama7g5_adc_channels[] = { - AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60), - AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64), - AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68), - AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c), - AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70), - AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74), - AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78), - AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c), - AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80), - AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84), - AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88), - AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c), - AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90), - AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94), - AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98), - AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c), - AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60), - AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68), - AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70), - AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78), - AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80), - AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88), - AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90), - AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98), + AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60), + AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64), + AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68), + AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c), + AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70), + AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74), + AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78), + AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c), + AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80), + AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84), + AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88), + AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c), + AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90), + AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94), + AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98), + AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c), + AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60), + AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68), + AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70), + AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78), + AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80), + AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88), + AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90), + AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98), IIO_CHAN_SOFT_TIMESTAMP(24), AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc), }; diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 366e252ebeb0..ff61b7649113 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -985,7 +985,7 @@ static int at91_ts_register(struct iio_dev *idev, return ret; err: - input_free_device(st->ts_input); + input_free_device(input); return ret; } diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c index 97d162a3cba4..49a2588e7431 100644 --- a/drivers/iio/adc/dln2-adc.c +++ b/drivers/iio/adc/dln2-adc.c @@ -483,7 +483,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct { __le16 values[DLN2_ADC_MAX_CHANNELS]; - int64_t timestamp_space; + aligned_s64 timestamp_space; } data; struct dln2_adc_get_all_vals dev_data; struct dln2_adc *dln2 = iio_priv(indio_dev); diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c index 57e2f3b082ca..22766d56626d 100644 --- a/drivers/iio/adc/rockchip_saradc.c +++ b/drivers/iio/adc/rockchip_saradc.c @@ -607,6 +607,8 @@ static irqreturn_t rockchip_saradc_trigger_handler(int irq, void *p) int ret; int i, j = 0; + memset(&data, 0, sizeof(data)); + mutex_lock(&info->lock); for_each_set_bit(i, i_dev->active_scan_mask, i_dev->masklength) { diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c index 4ca62121f0d1..c6004f2b5d3d 100644 --- a/drivers/iio/adc/ti-ads124s08.c +++ b/drivers/iio/adc/ti-ads124s08.c @@ -183,9 +183,9 @@ static int ads124s_reset(struct iio_dev *indio_dev) struct ads124s_private *priv = iio_priv(indio_dev); if (priv->reset_gpio) { - gpiod_set_value(priv->reset_gpio, 0); + gpiod_set_value_cansleep(priv->reset_gpio, 0); udelay(200); - gpiod_set_value(priv->reset_gpio, 1); + gpiod_set_value_cansleep(priv->reset_gpio, 1); } else { return ads124s_write_cmd(indio_dev, ADS124S08_CMD_RESET); } diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index ef06a897421a..66a3b67019b8 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c @@ -382,7 +382,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; /* Ensure naturally aligned timestamp */ - u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8); + u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)] __aligned(8) = { }; int i, j = 0; for (i = 0; i < indio_dev->masklength; i++) { diff --git a/drivers/iio/chemical/sps30.c b/drivers/iio/chemical/sps30.c index 814ce0aad1cc..4085a36cd1db 100644 --- a/drivers/iio/chemical/sps30.c +++ b/drivers/iio/chemical/sps30.c @@ -108,7 +108,7 @@ static irqreturn_t sps30_trigger_handler(int irq, void *p) int ret; struct { s32 data[4]; /* PM1, PM2P5, PM4, PM10 */ - s64 ts; + aligned_s64 ts; } scan; mutex_lock(&state->lock); diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c index a492e8f2fc0f..130bda620222 100644 --- a/drivers/iio/dac/ad3552r.c +++ b/drivers/iio/dac/ad3552r.c @@ -703,6 +703,12 @@ static int ad3552r_reset(struct ad3552r_desc *dac) return ret; } + /* Clear reset error flag, see ad3552r manual, rev B table 38. */ + ret = ad3552r_write_reg(dac, AD3552R_REG_ADDR_ERR_STATUS, + AD3552R_MASK_RESET_STATUS); + if (ret) + return ret; + return ad3552r_update_reg_field(dac, addr_mask_map[AD3552R_ADDR_ASCENSION][0], addr_mask_map[AD3552R_ADDR_ASCENSION][1], diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c index 9b2f99449a82..bc85fe6610c1 100644 --- a/drivers/iio/dummy/iio_simple_dummy_buffer.c +++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c @@ -48,7 +48,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p) int i = 0, j; u16 *data; - data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); + data = kzalloc(indio_dev->scan_bytes, GFP_KERNEL); if (!data) goto done; diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c index 68de45fe21b4..c7f5911f9040 100644 --- a/drivers/iio/filter/admv8818.c +++ b/drivers/iio/filter/admv8818.c @@ -527,21 +527,15 @@ static int admv8818_init(struct admv8818_state *st) struct spi_device *spi = st->spi; unsigned int chip_id; - ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A, - ADMV8818_SOFTRESET_N_MSK | - ADMV8818_SOFTRESET_MSK, - FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) | - FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1)); + ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A, + ADMV8818_SOFTRESET_N_MSK | ADMV8818_SOFTRESET_MSK); if (ret) { dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n"); return ret; } - ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A, - ADMV8818_SDOACTIVE_N_MSK | - ADMV8818_SDOACTIVE_MSK, - FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) | - FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1)); + ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A, + ADMV8818_SDOACTIVE_N_MSK | ADMV8818_SDOACTIVE_MSK); if (ret) { dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n"); return ret; diff --git a/drivers/iio/gyro/fxas21002c_core.c b/drivers/iio/gyro/fxas21002c_core.c index a36d71d9e3ea..e38fd7dcecfe 100644 --- a/drivers/iio/gyro/fxas21002c_core.c +++ b/drivers/iio/gyro/fxas21002c_core.c @@ -730,14 +730,21 @@ static irqreturn_t fxas21002c_trigger_handler(int irq, void *p) int ret; mutex_lock(&data->lock); - ret = regmap_bulk_read(data->regmap, FXAS21002C_REG_OUT_X_MSB, - data->buffer, CHANNEL_SCAN_MAX * sizeof(s16)); + ret = fxas21002c_pm_get(data); if (ret < 0) goto out_unlock; + ret = regmap_bulk_read(data->regmap, FXAS21002C_REG_OUT_X_MSB, + data->buffer, CHANNEL_SCAN_MAX * sizeof(s16)); + if (ret < 0) + goto out_pm_put; + iio_push_to_buffers_with_timestamp(indio_dev, data->buffer, data->timestamp); +out_pm_put: + fxas21002c_pm_put(data); + out_unlock: mutex_unlock(&data->lock); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h index 3d91469beccb..20fb1c33b90a 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h @@ -360,6 +360,7 @@ struct inv_icm42600_state { typedef int (*inv_icm42600_bus_setup)(struct inv_icm42600_state *); extern const struct regmap_config inv_icm42600_regmap_config; +extern const struct regmap_config inv_icm42600_spi_regmap_config; extern const struct dev_pm_ops inv_icm42600_pm_ops; const struct iio_mount_matrix * diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index ca85fccc9839..9dec4ad38c0d 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -43,6 +43,17 @@ const struct regmap_config inv_icm42600_regmap_config = { }; EXPORT_SYMBOL_GPL(inv_icm42600_regmap_config); +/* define specific regmap for SPI not supporting burst write */ +const struct regmap_config inv_icm42600_spi_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x4FFF, + .ranges = inv_icm42600_regmap_ranges, + .num_ranges = ARRAY_SIZE(inv_icm42600_regmap_ranges), + .use_single_write = true, +}; +EXPORT_SYMBOL_GPL(inv_icm42600_spi_regmap_config); + struct inv_icm42600_hw { uint8_t whoami; const char *name; @@ -709,6 +720,8 @@ out_unlock: static int __maybe_unused inv_icm42600_resume(struct device *dev) { struct inv_icm42600_state *st = dev_get_drvdata(dev); + struct inv_icm42600_timestamp *gyro_ts = iio_priv(st->indio_gyro); + struct inv_icm42600_timestamp *accel_ts = iio_priv(st->indio_accel); int ret; mutex_lock(&st->lock); @@ -729,9 +742,12 @@ static int __maybe_unused inv_icm42600_resume(struct device *dev) goto out_unlock; /* restore FIFO data streaming */ - if (st->fifo.on) + if (st->fifo.on) { + inv_icm42600_timestamp_reset(gyro_ts); + inv_icm42600_timestamp_reset(accel_ts); ret = regmap_write(st->map, INV_ICM42600_REG_FIFO_CONFIG, INV_ICM42600_FIFO_CONFIG_STREAM); + } out_unlock: mutex_unlock(&st->lock); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c index e6305e5fa975..4441c3bb9601 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c @@ -59,7 +59,8 @@ static int inv_icm42600_probe(struct spi_device *spi) return -EINVAL; chip = (uintptr_t)match; - regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config); + /* use SPI specific regmap */ + regmap = devm_regmap_init_spi(spi, &inv_icm42600_spi_regmap_config); if (IS_ERR(regmap)) return PTR_ERR(regmap); diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c index b10c0dcac0bb..c9c3a276378c 100644 --- a/drivers/iio/imu/kmx61.c +++ b/drivers/iio/imu/kmx61.c @@ -1192,7 +1192,7 @@ static irqreturn_t kmx61_trigger_handler(int irq, void *p) struct kmx61_data *data = kmx61_get_data(indio_dev); int bit, ret, i = 0; u8 base; - s16 buffer[8]; + s16 buffer[8] = { }; if (indio_dev == data->acc_indio_dev) base = KMX61_ACC_XOUT_L; diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index e49f2d120ed3..f5a68059f0a6 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c @@ -370,6 +370,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw) if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK)) return 0; + if (!pattern_len) + pattern_len = ST_LSM6DSX_SAMPLE_SIZE; + fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) * ST_LSM6DSX_CHAN_SIZE; fifo_len = (fifo_len / pattern_len) * pattern_len; @@ -587,6 +590,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw) if (!fifo_len) return 0; + if (!pattern_len) + pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE; + for (read_len = 0; read_len < fifo_len; read_len += pattern_len) { err = st_lsm6dsx_read_block(hw, ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR, diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 81344ceac951..5c210f48bd9c 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -276,7 +276,7 @@ struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, return ERR_PTR(-ENODEV); } - chan = __fwnode_iio_channel_get_by_name(fwnode, name); + chan = __fwnode_iio_channel_get_by_name(parent, name); if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) { fwnode_handle_put(parent); return chan; @@ -513,7 +513,7 @@ struct iio_channel *iio_channel_get_all(struct device *dev) return chans; error_free_chans: - for (i = 0; i < nummaps; i++) + for (i = 0; i < mapind; i++) iio_device_put(chans[i].indio_dev); kfree(chans); error_ret: diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c index ce5363845b22..c1c37e5a8515 100644 --- a/drivers/iio/light/al3010.c +++ b/drivers/iio/light/al3010.c @@ -87,7 +87,12 @@ static int al3010_init(struct al3010_data *data) int ret; ret = al3010_set_pwr(data->client, true); + if (ret < 0) + return ret; + ret = devm_add_action_or_reset(&data->client->dev, + al3010_set_pwr_off, + data); if (ret < 0) return ret; @@ -191,12 +196,6 @@ static int al3010_probe(struct i2c_client *client, return ret; } - ret = devm_add_action_or_reset(&client->dev, - al3010_set_pwr_off, - data); - if (ret < 0) - return ret; - return devm_iio_device_register(&client->dev, indio_dev); } diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c index 2307fc531752..895b0c9c6d1d 100644 --- a/drivers/iio/light/as73211.c +++ b/drivers/iio/light/as73211.c @@ -154,6 +154,12 @@ struct as73211_data { BIT(AS73211_SCAN_INDEX_TEMP) | \ AS73211_SCAN_MASK_COLOR) +static const unsigned long as73211_scan_masks[] = { + AS73211_SCAN_MASK_COLOR, + AS73211_SCAN_MASK_ALL, + 0 +}; + static const struct iio_chan_spec as73211_channels[] = { { .type = IIO_TEMP, @@ -602,9 +608,12 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p) /* AS73211 starts reading at address 2 */ ret = i2c_master_recv(data->client, - (char *)&scan.chan[1], 3 * sizeof(scan.chan[1])); + (char *)&scan.chan[0], 3 * sizeof(scan.chan[0])); if (ret < 0) goto done; + + /* Avoid pushing uninitialized data */ + scan.chan[3] = 0; } if (data_result) { @@ -612,9 +621,15 @@ static irqreturn_t as73211_trigger_handler(int irq __always_unused, void *p) * Saturate all channels (in case of overflows). Temperature channel * is not affected by overflows. */ - scan.chan[1] = cpu_to_le16(U16_MAX); - scan.chan[2] = cpu_to_le16(U16_MAX); - scan.chan[3] = cpu_to_le16(U16_MAX); + if (*indio_dev->active_scan_mask == AS73211_SCAN_MASK_ALL) { + scan.chan[1] = cpu_to_le16(U16_MAX); + scan.chan[2] = cpu_to_le16(U16_MAX); + scan.chan[3] = cpu_to_le16(U16_MAX); + } else { + scan.chan[0] = cpu_to_le16(U16_MAX); + scan.chan[1] = cpu_to_le16(U16_MAX); + scan.chan[2] = cpu_to_le16(U16_MAX); + } } iio_push_to_buffers_with_timestamp(indio_dev, &scan, iio_get_time_ns(indio_dev)); @@ -684,6 +699,7 @@ static int as73211_probe(struct i2c_client *client) indio_dev->channels = as73211_channels; indio_dev->num_channels = ARRAY_SIZE(as73211_channels); indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->available_scan_masks = as73211_scan_masks; ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR); if (ret < 0) diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 74a1ccda8b9c..49f293232a70 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -1631,6 +1631,8 @@ static const struct acpi_device_id ltr_acpi_match[] = { { "LTER0501", ltr501 }, { "LTER0559", ltr559 }, { "LTER0301", ltr301 }, + /* https://www.catalog.update.microsoft.com/Search.aspx?q=lter0303 */ + { "LTER0303", ltr303 }, { }, }; MODULE_DEVICE_TABLE(acpi, ltr_acpi_match); diff --git a/drivers/iio/light/vcnl4035.c b/drivers/iio/light/vcnl4035.c index a23c415fcb7a..db954c6faa2f 100644 --- a/drivers/iio/light/vcnl4035.c +++ b/drivers/iio/light/vcnl4035.c @@ -105,7 +105,7 @@ static irqreturn_t vcnl4035_trigger_consumer_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct vcnl4035_data *data = iio_priv(indio_dev); /* Ensure naturally aligned timestamp */ - u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8); + u8 buffer[ALIGN(sizeof(u16), sizeof(s64)) + sizeof(s64)] __aligned(8) = { }; int ret; ret = regmap_read(data->regmap, VCNL4035_ALS_DATA, (int *)buffer); diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c index 801c760feb4d..1cbe3ca79747 100644 --- a/drivers/iio/magnetometer/yamaha-yas530.c +++ b/drivers/iio/magnetometer/yamaha-yas530.c @@ -372,6 +372,7 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y u8 data[8]; u16 xy1y2[3]; s32 h[3], s[3]; + int half_range = BIT(13); int i, ret; mutex_lock(&yas5xx->lock); @@ -406,13 +407,13 @@ static int yas537_measure(struct yas5xx *yas5xx, u16 *t, u16 *x, u16 *y1, u16 *y /* The second version of YAS537 needs to include calibration coefficients */ if (yas5xx->version == YAS537_VERSION_1) { for (i = 0; i < 3; i++) - s[i] = xy1y2[i] - BIT(13); - h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / BIT(13); - h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / BIT(13); - h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / BIT(13); + s[i] = xy1y2[i] - half_range; + h[0] = (c->k * (128 * s[0] + c->a2 * s[1] + c->a3 * s[2])) / half_range; + h[1] = (c->k * (c->a4 * s[0] + c->a5 * s[1] + c->a6 * s[2])) / half_range; + h[2] = (c->k * (c->a7 * s[0] + c->a8 * s[1] + c->a9 * s[2])) / half_range; for (i = 0; i < 3; i++) { - clamp_val(h[i], -BIT(13), BIT(13) - 1); - xy1y2[i] = h[i] + BIT(13); + h[i] = clamp(h[i], -half_range, half_range - 1); + xy1y2[i] = h[i] + half_range; } } diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index 67119a9b95fc..fdb1b765206b 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c @@ -586,6 +586,8 @@ static int zpa2326_fill_sample_buffer(struct iio_dev *indio_dev, } sample; int err; + memset(&sample, 0, sizeof(sample)); + if (test_bit(0, indio_dev->active_scan_mask)) { /* Get current pressure from hardware FIFO. */ err = zpa2326_dequeue_pressure(indio_dev, &sample.pressure); diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c index c28a7a6dea5f..555a61e2f3fd 100644 --- a/drivers/iio/temperature/maxim_thermocouple.c +++ b/drivers/iio/temperature/maxim_thermocouple.c @@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = { struct maxim_thermocouple_data { struct spi_device *spi; const struct maxim_thermocouple_chip *chip; + char tc_type; u8 buffer[16] __aligned(IIO_DMA_MINALIGN); - char tc_type; }; static int maxim_thermocouple_read(struct maxim_thermocouple_data *data, diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 067d7f42871f..bb3c361bd8d4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -72,6 +72,8 @@ static const char * const cma_events[] = { static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, enum ib_gid_type gid_type); +static void cma_netevent_work_handler(struct work_struct *_work); + const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { size_t index = event; @@ -994,6 +996,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); id_priv->id.route.addr.dev_addr.net = get_net(net); id_priv->seq_num &= 0x00ffffff; + INIT_WORK(&id_priv->id.net_work, cma_netevent_work_handler); rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); if (parent) @@ -5186,7 +5189,6 @@ static int cma_netevent_callback(struct notifier_block *self, if (!memcmp(current_id->id.route.addr.dev_addr.dst_dev_addr, neigh->ha, ETH_ALEN)) continue; - INIT_WORK(¤t_id->id.net_work, cma_netevent_work_handler); cma_id_get(current_id); queue_work(cma_wq, ¤t_id->id.net_work); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 291ded20934c..a5ba2cb3031f 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -543,6 +543,8 @@ static struct class ib_class = { static void rdma_init_coredev(struct ib_core_device *coredev, struct ib_device *dev, struct net *net) { + bool is_full_dev = &dev->coredev == coredev; + /* This BUILD_BUG_ON is intended to catch layout change * of union of ib_core_device and device. * dev must be the first element as ib_core and providers @@ -554,6 +556,13 @@ static void rdma_init_coredev(struct ib_core_device *coredev, coredev->dev.class = &ib_class; coredev->dev.groups = dev->groups; + + /* + * Don't expose hw counters outside of the init namespace. + */ + if (!is_full_dev && dev->hw_stats_attr_index) + coredev->dev.groups[dev->hw_stats_attr_index] = NULL; + device_initialize(&coredev->dev); coredev->owner = dev; INIT_LIST_HEAD(&coredev->port_list); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 58befbaaf0ad..242434c09e8d 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2671,11 +2671,11 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) { unsigned long flags; - int post, ret; struct ib_mad_private *mad_priv; struct ib_sge sg_list; struct ib_recv_wr recv_wr; struct ib_mad_queue *recv_queue = &qp_info->recv_queue; + int ret = 0; /* Initialize common scatter list fields */ sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey; @@ -2685,7 +2685,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, recv_wr.sg_list = &sg_list; recv_wr.num_sge = 1; - do { + while (true) { /* Allocate and map receive buffer */ if (mad) { mad_priv = mad; @@ -2693,10 +2693,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, } else { mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv), GFP_ATOMIC); - if (!mad_priv) { - ret = -ENOMEM; - break; - } + if (!mad_priv) + return -ENOMEM; } sg_list.length = mad_priv_dma_size(mad_priv); sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, @@ -2705,37 +2703,41 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device, sg_list.addr))) { - kfree(mad_priv); ret = -ENOMEM; - break; + goto free_mad_priv; } mad_priv->header.mapping = sg_list.addr; mad_priv->header.mad_list.mad_queue = recv_queue; mad_priv->header.mad_list.cqe.done = ib_mad_recv_done; recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; - - /* Post receive WR */ spin_lock_irqsave(&recv_queue->lock, flags); - post = (++recv_queue->count < recv_queue->max_active); - list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list); + if (recv_queue->count >= recv_queue->max_active) { + /* Fully populated the receive queue */ + spin_unlock_irqrestore(&recv_queue->lock, flags); + break; + } + recv_queue->count++; + list_add_tail(&mad_priv->header.mad_list.list, + &recv_queue->list); spin_unlock_irqrestore(&recv_queue->lock, flags); + ret = ib_post_recv(qp_info->qp, &recv_wr, NULL); if (ret) { spin_lock_irqsave(&recv_queue->lock, flags); list_del(&mad_priv->header.mad_list.list); recv_queue->count--; spin_unlock_irqrestore(&recv_queue->lock, flags); - ib_dma_unmap_single(qp_info->port_priv->device, - mad_priv->header.mapping, - mad_priv_dma_size(mad_priv), - DMA_FROM_DEVICE); - kfree(mad_priv); dev_err(&qp_info->port_priv->device->dev, "ib_post_recv failed: %d\n", ret); break; } - } while (post); + } + ib_dma_unmap_single(qp_info->port_priv->device, + mad_priv->header.mapping, + mad_priv_dma_size(mad_priv), DMA_FROM_DEVICE); +free_mad_priv: + kfree(mad_priv); return ret; } diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index ec5efdc16660..a9c33a6220ea 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -984,6 +984,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev) for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++) if (!ibdev->groups[i]) { ibdev->groups[i] = &data->group; + ibdev->hw_stats_attr_index = i; return 0; } WARN(true, "struct ib_device->groups is too small"); diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 8ce569bf7525..1d154055a335 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -80,9 +80,12 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, unsigned long pgsz_bitmap, unsigned long virt) { - struct scatterlist *sg; + unsigned long curr_len = 0; + dma_addr_t curr_base = ~0; unsigned long va, pgoff; + struct scatterlist *sg; dma_addr_t mask; + dma_addr_t end; int i; umem->iova = va = virt; @@ -107,17 +110,30 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, pgoff = umem->address & ~PAGE_MASK; for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) { - /* Walk SGL and reduce max page size if VA/PA bits differ - * for any address. + /* If the current entry is physically contiguous with the previous + * one, no need to take its start addresses into consideration. */ - mask |= (sg_dma_address(sg) + pgoff) ^ va; + if (check_add_overflow(curr_base, curr_len, &end) || + end != sg_dma_address(sg)) { + + curr_base = sg_dma_address(sg); + curr_len = 0; + + /* Reduce max page size if VA/PA bits differ */ + mask |= (curr_base + pgoff) ^ va; + + /* The alignment of any VA matching a discontinuity point + * in the physical memory sets the maximum possible page + * size as this must be a starting point of a new page that + * needs to be aligned. + */ + if (i != 0) + mask |= va; + } + + curr_len += sg_dma_len(sg); va += sg_dma_len(sg) - pgoff; - /* Except for the last entry, the ending iova alignment sets - * the maximum possible page size as the low bits of the iova - * must be zero when starting the next chunk. - */ - if (i != (umem->sgt_append.sgt.nents - 1)) - mask |= va; + pgoff = 0; } diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index e9fa22d31c23..c48ef6083020 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -76,12 +76,14 @@ static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp, npfns = (end - start) >> PAGE_SHIFT; umem_odp->pfn_list = kvcalloc( - npfns, sizeof(*umem_odp->pfn_list), GFP_KERNEL); + npfns, sizeof(*umem_odp->pfn_list), + GFP_KERNEL | __GFP_NOWARN); if (!umem_odp->pfn_list) return -ENOMEM; umem_odp->dma_list = kvcalloc( - ndmas, sizeof(*umem_odp->dma_list), GFP_KERNEL); + ndmas, sizeof(*umem_odp->dma_list), + GFP_KERNEL | __GFP_NOWARN); if (!umem_odp->dma_list) { ret = -ENOMEM; goto out_pfn_list; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index e836c9c477f6..33e2fe0facd5 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter, { const void __user *res = iter->cur; - if (iter->cur + len > iter->end) + if (len > iter->end - iter->cur) return (void __force __user *)ERR_PTR(-ENOSPC); iter->cur += len; return res; @@ -718,8 +718,8 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs) goto err_free; pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!pd) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err_free; } @@ -809,8 +809,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) if (cmd.flags & IB_MR_REREG_PD) { new_pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!new_pd) { - ret = -EINVAL; + if (IS_ERR(new_pd)) { + ret = PTR_ERR(new_pd); goto put_uobjs; } } else { @@ -919,8 +919,8 @@ static int ib_uverbs_alloc_mw(struct uverbs_attr_bundle *attrs) return PTR_ERR(uobj); pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!pd) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err_free; } @@ -1127,8 +1127,8 @@ static int ib_uverbs_resize_cq(struct uverbs_attr_bundle *attrs) return ret; cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) - return -EINVAL; + if (IS_ERR(cq)) + return PTR_ERR(cq); ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata); if (ret) @@ -1189,8 +1189,8 @@ static int ib_uverbs_poll_cq(struct uverbs_attr_bundle *attrs) return ret; cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) - return -EINVAL; + if (IS_ERR(cq)) + return PTR_ERR(cq); /* we copy a struct ib_uverbs_poll_cq_resp to user space */ header_ptr = attrs->ucore.outbuf; @@ -1238,8 +1238,8 @@ static int ib_uverbs_req_notify_cq(struct uverbs_attr_bundle *attrs) return ret; cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) - return -EINVAL; + if (IS_ERR(cq)) + return PTR_ERR(cq); ib_req_notify_cq(cq, cmd.solicited_only ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP); @@ -1321,8 +1321,8 @@ static int create_qp(struct uverbs_attr_bundle *attrs, ind_tbl = uobj_get_obj_read(rwq_ind_table, UVERBS_OBJECT_RWQ_IND_TBL, cmd->rwq_ind_tbl_handle, attrs); - if (!ind_tbl) { - ret = -EINVAL; + if (IS_ERR(ind_tbl)) { + ret = PTR_ERR(ind_tbl); goto err_put; } @@ -1360,8 +1360,10 @@ static int create_qp(struct uverbs_attr_bundle *attrs, if (cmd->is_srq) { srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd->srq_handle, attrs); - if (!srq || srq->srq_type == IB_SRQT_XRC) { - ret = -EINVAL; + if (IS_ERR(srq) || + srq->srq_type == IB_SRQT_XRC) { + ret = IS_ERR(srq) ? PTR_ERR(srq) : + -EINVAL; goto err_put; } } @@ -1371,23 +1373,29 @@ static int create_qp(struct uverbs_attr_bundle *attrs, rcq = uobj_get_obj_read( cq, UVERBS_OBJECT_CQ, cmd->recv_cq_handle, attrs); - if (!rcq) { - ret = -EINVAL; + if (IS_ERR(rcq)) { + ret = PTR_ERR(rcq); goto err_put; } } } } - if (has_sq) + if (has_sq) { scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->send_cq_handle, attrs); + if (IS_ERR(scq)) { + ret = PTR_ERR(scq); + goto err_put; + } + } + if (!ind_tbl && cmd->qp_type != IB_QPT_XRC_INI) rcq = rcq ?: scq; pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs); - if (!pd || (!scq && has_sq)) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err_put; } @@ -1482,18 +1490,18 @@ static int create_qp(struct uverbs_attr_bundle *attrs, err_put: if (!IS_ERR(xrcd_uobj)) uobj_put_read(xrcd_uobj); - if (pd) + if (!IS_ERR_OR_NULL(pd)) uobj_put_obj_read(pd); - if (scq) + if (!IS_ERR_OR_NULL(scq)) rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); - if (rcq && rcq != scq) + if (!IS_ERR_OR_NULL(rcq) && rcq != scq) rdma_lookup_put_uobject(&rcq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); - if (srq) + if (!IS_ERR_OR_NULL(srq)) rdma_lookup_put_uobject(&srq->uobject->uevent.uobject, UVERBS_LOOKUP_READ); - if (ind_tbl) + if (!IS_ERR_OR_NULL(ind_tbl)) uobj_put_obj_read(ind_tbl); uobj_alloc_abort(&obj->uevent.uobject, attrs); @@ -1655,8 +1663,8 @@ static int ib_uverbs_query_qp(struct uverbs_attr_bundle *attrs) } qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) { - ret = -EINVAL; + if (IS_ERR(qp)) { + ret = PTR_ERR(qp); goto out; } @@ -1761,8 +1769,8 @@ static int modify_qp(struct uverbs_attr_bundle *attrs, qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd->base.qp_handle, attrs); - if (!qp) { - ret = -EINVAL; + if (IS_ERR(qp)) { + ret = PTR_ERR(qp); goto out; } @@ -2009,11 +2017,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd)); if (ret) return ret; - wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count); + wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size, + cmd.wr_count)); if (IS_ERR(wqes)) return PTR_ERR(wqes); - sgls = uverbs_request_next_ptr( - &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge)); + sgls = uverbs_request_next_ptr(&iter, + size_mul(cmd.sge_count, + sizeof(struct ib_uverbs_sge))); if (IS_ERR(sgls)) return PTR_ERR(sgls); ret = uverbs_request_finish(&iter); @@ -2025,8 +2035,8 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) return -ENOMEM; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) { - ret = -EINVAL; + if (IS_ERR(qp)) { + ret = PTR_ERR(qp); goto out; } @@ -2063,9 +2073,9 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs) ud->ah = uobj_get_obj_read(ah, UVERBS_OBJECT_AH, user_wr->wr.ud.ah, attrs); - if (!ud->ah) { + if (IS_ERR(ud->ah)) { + ret = PTR_ERR(ud->ah); kfree(ud); - ret = -EINVAL; goto out_put; } ud->remote_qpn = user_wr->wr.ud.remote_qpn; @@ -2199,11 +2209,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count, if (wqe_size < sizeof(struct ib_uverbs_recv_wr)) return ERR_PTR(-EINVAL); - wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count); + wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count)); if (IS_ERR(wqes)) return ERR_CAST(wqes); - sgls = uverbs_request_next_ptr( - iter, sge_count * sizeof(struct ib_uverbs_sge)); + sgls = uverbs_request_next_ptr(iter, size_mul(sge_count, + sizeof(struct ib_uverbs_sge))); if (IS_ERR(sgls)) return ERR_CAST(sgls); ret = uverbs_request_finish(iter); @@ -2302,8 +2312,8 @@ static int ib_uverbs_post_recv(struct uverbs_attr_bundle *attrs) return PTR_ERR(wr); qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) { - ret = -EINVAL; + if (IS_ERR(qp)) { + ret = PTR_ERR(qp); goto out; } @@ -2353,8 +2363,8 @@ static int ib_uverbs_post_srq_recv(struct uverbs_attr_bundle *attrs) return PTR_ERR(wr); srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); - if (!srq) { - ret = -EINVAL; + if (IS_ERR(srq)) { + ret = PTR_ERR(srq); goto out; } @@ -2410,8 +2420,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs) } pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!pd) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err; } @@ -2480,8 +2490,8 @@ static int ib_uverbs_attach_mcast(struct uverbs_attr_bundle *attrs) return ret; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) - return -EINVAL; + if (IS_ERR(qp)) + return PTR_ERR(qp); obj = qp->uobject; @@ -2530,8 +2540,8 @@ static int ib_uverbs_detach_mcast(struct uverbs_attr_bundle *attrs) return ret; qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) - return -EINVAL; + if (IS_ERR(qp)) + return PTR_ERR(qp); obj = qp->uobject; mutex_lock(&obj->mcast_lock); @@ -2665,8 +2675,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, UVERBS_OBJECT_FLOW_ACTION, kern_spec->action.handle, attrs); - if (!ib_spec->action.act) - return -EINVAL; + if (IS_ERR(ib_spec->action.act)) + return PTR_ERR(ib_spec->action.act); ib_spec->action.size = sizeof(struct ib_flow_spec_action_handle); flow_resources_add(uflow_res, @@ -2683,8 +2693,8 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs, UVERBS_OBJECT_COUNTERS, kern_spec->flow_count.handle, attrs); - if (!ib_spec->flow_count.counters) - return -EINVAL; + if (IS_ERR(ib_spec->flow_count.counters)) + return PTR_ERR(ib_spec->flow_count.counters); ib_spec->flow_count.size = sizeof(struct ib_flow_spec_action_count); flow_resources_add(uflow_res, @@ -2902,14 +2912,14 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs) return PTR_ERR(obj); pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd.pd_handle, attrs); - if (!pd) { - err = -EINVAL; + if (IS_ERR(pd)) { + err = PTR_ERR(pd); goto err_uobj; } cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) { - err = -EINVAL; + if (IS_ERR(cq)) { + err = PTR_ERR(cq); goto err_put_pd; } @@ -3010,8 +3020,8 @@ static int ib_uverbs_ex_modify_wq(struct uverbs_attr_bundle *attrs) return -EINVAL; wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs); - if (!wq) - return -EINVAL; + if (IS_ERR(wq)) + return PTR_ERR(wq); if (cmd.attr_mask & IB_WQ_FLAGS) { wq_attr.flags = cmd.flags; @@ -3094,8 +3104,8 @@ static int ib_uverbs_ex_create_rwq_ind_table(struct uverbs_attr_bundle *attrs) num_read_wqs++) { wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wqs_handles[num_read_wqs], attrs); - if (!wq) { - err = -EINVAL; + if (IS_ERR(wq)) { + err = PTR_ERR(wq); goto put_wqs; } @@ -3250,8 +3260,8 @@ static int ib_uverbs_ex_create_flow(struct uverbs_attr_bundle *attrs) } qp = uobj_get_obj_read(qp, UVERBS_OBJECT_QP, cmd.qp_handle, attrs); - if (!qp) { - err = -EINVAL; + if (IS_ERR(qp)) { + err = PTR_ERR(qp); goto err_uobj; } @@ -3397,15 +3407,15 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs, if (ib_srq_has_cq(cmd->srq_type)) { attr.ext.cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd->cq_handle, attrs); - if (!attr.ext.cq) { - ret = -EINVAL; + if (IS_ERR(attr.ext.cq)) { + ret = PTR_ERR(attr.ext.cq); goto err_put_xrcd; } } pd = uobj_get_obj_read(pd, UVERBS_OBJECT_PD, cmd->pd_handle, attrs); - if (!pd) { - ret = -EINVAL; + if (IS_ERR(pd)) { + ret = PTR_ERR(pd); goto err_put_cq; } @@ -3512,8 +3522,8 @@ static int ib_uverbs_modify_srq(struct uverbs_attr_bundle *attrs) return ret; srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); - if (!srq) - return -EINVAL; + if (IS_ERR(srq)) + return PTR_ERR(srq); attr.max_wr = cmd.max_wr; attr.srq_limit = cmd.srq_limit; @@ -3540,8 +3550,8 @@ static int ib_uverbs_query_srq(struct uverbs_attr_bundle *attrs) return ret; srq = uobj_get_obj_read(srq, UVERBS_OBJECT_SRQ, cmd.srq_handle, attrs); - if (!srq) - return -EINVAL; + if (IS_ERR(srq)) + return PTR_ERR(srq); ret = ib_query_srq(srq, &attr); @@ -3666,8 +3676,8 @@ static int ib_uverbs_ex_modify_cq(struct uverbs_attr_bundle *attrs) return -EOPNOTSUPP; cq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, cmd.cq_handle, attrs); - if (!cq) - return -EINVAL; + if (IS_ERR(cq)) + return PTR_ERR(cq); ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index b99b3cc283b6..97a116960f31 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -2959,22 +2959,23 @@ EXPORT_SYMBOL(__rdma_block_iter_start); bool __rdma_block_iter_next(struct ib_block_iter *biter) { unsigned int block_offset; - unsigned int sg_delta; + unsigned int delta; if (!biter->__sg_nents || !biter->__sg) return false; biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); - sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; + delta = BIT_ULL(biter->__pg_bit) - block_offset; - if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { - biter->__sg_advance += sg_delta; - } else { + while (biter->__sg_nents && biter->__sg && + sg_dma_len(biter->__sg) - biter->__sg_advance <= delta) { + delta -= sg_dma_len(biter->__sg) - biter->__sg_advance; biter->__sg_advance = 0; biter->__sg = sg_next(biter->__sg); biter->__sg_nents--; } + biter->__sg_advance += delta; return true; } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 4c34cb1cb786..94c34ba103ea 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -138,7 +138,7 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->vendor_id = rdev->en_dev->pdev->vendor; ib_attr->vendor_part_id = rdev->en_dev->pdev->device; - ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device; + ib_attr->hw_ver = rdev->en_dev->pdev->revision; ib_attr->max_qp = dev_attr->max_qp; ib_attr->max_qp_wr = dev_attr->max_qp_wqes; ib_attr->device_cap_flags = @@ -1940,18 +1940,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, } } - if (qp_attr_mask & IB_QP_PATH_MTU) { - qp->qplib_qp.modify_flags |= - CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; - qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); - qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu); - } else if (qp_attr->qp_state == IB_QPS_RTR) { - qp->qplib_qp.modify_flags |= - CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; - qp->qplib_qp.path_mtu = - __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); - qp->qplib_qp.mtu = - ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); + if (qp_attr->qp_state == IB_QPS_RTR) { + enum ib_mtu qpmtu; + + qpmtu = iboe_get_mtu(rdev->netdev->mtu); + if (qp_attr_mask & IB_QP_PATH_MTU) { + if (ib_mtu_enum_to_int(qp_attr->path_mtu) > + ib_mtu_enum_to_int(qpmtu)) + return -EINVAL; + qpmtu = qp_attr->path_mtu; + } + + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; + qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu); + qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu); } if (qp_attr_mask & IB_QP_TIMEOUT) { @@ -3340,7 +3342,7 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp, wc->byte_len = orig_cqe->length; wc->qp = &gsi_qp->ib_qp; - wc->ex.imm_data = cpu_to_be32(le32_to_cpu(orig_cqe->immdata)); + wc->ex.imm_data = cpu_to_be32(orig_cqe->immdata); wc->src_qp = orig_cqe->src_qp; memcpy(wc->smac, orig_cqe->smac, ETH_ALEN); if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) { @@ -3476,7 +3478,10 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) (unsigned long)(cqe->qp_handle), struct bnxt_re_qp, qplib_qp); wc->qp = &qp->ib_qp; - wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immdata)); + if (cqe->flags & CQ_RES_RC_FLAGS_IMM) + wc->ex.imm_data = cpu_to_be32(cqe->immdata); + else + wc->ex.invalidate_rkey = cqe->invrkey; wc->src_qp = cqe->src_qp; memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->port_num = 1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index 7bd7ac8d52e6..15a62d0d243c 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1149,9 +1149,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) rq->dbinfo.db = qp->dpi->dbr; rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size); } + spin_lock_bh(&rcfw->tbl_lock); tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw); rcfw->qp_tbl[tbl_indx].qp_id = qp->id; rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp; + spin_unlock_bh(&rcfw->tbl_lock); return 0; fail: @@ -1184,8 +1186,6 @@ static void __modify_flags_from_init_state(struct bnxt_qplib_qp *qp) qp->path_mtu = CMDQ_MODIFY_QP_PATH_MTU_MTU_2048; } - qp->modify_flags &= - ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID; /* Bono FW require the max_dest_rd_atomic to be >= 1 */ if (qp->max_dest_rd_atomic < 1) qp->max_dest_rd_atomic = 1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 57a3dae87f65..13263ce2309d 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -374,7 +374,7 @@ struct bnxt_qplib_cqe { u16 cfa_meta; u64 wr_id; union { - __le32 immdata; + u32 immdata; u32 invrkey; }; u64 qp_handle; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 9c28f4625c92..9d4f744b001a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -220,9 +220,10 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_ctx *ctx, int is_virtfn); void bnxt_qplib_mark_qp_error(void *qp_handle); + static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw) { /* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/ - return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2; + return (qid == 1) ? rcfw->qp_tbl_size - 1 : (qid % (rcfw->qp_tbl_size - 2)); } #endif /* __BNXT_QPLIB_RCFW_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index bae7d8926143..f59e8755f611 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -124,7 +124,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_qp_init_rd_atom = sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom; - attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr); + attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1; /* * 128 WQEs needs to be reserved for the HW (8916). Prevent * reporting the max number diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 80970a1738f8..034b85c42255 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -1114,8 +1114,10 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, * The math here assumes sizeof cpl_pass_accept_req >= sizeof * cpl_rx_pkt. */ - skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + - sizeof(struct rss_header) - pktshift, GFP_ATOMIC); + skb = alloc_skb(size_add(gl->tot_len, + sizeof(struct cpl_pass_accept_req) + + sizeof(struct rss_header)) - pktshift, + GFP_ATOMIC); if (unlikely(!skb)) return NULL; diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index 15ee92081118..924940ca9de0 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -452,7 +452,6 @@ static void efa_ib_device_remove(struct efa_dev *dev) ibdev_info(&dev->ibdev, "Unregister ib device\n"); ib_unregister_device(&dev->ibdev); efa_destroy_eqs(dev); - efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL); efa_release_doorbell_bar(dev); } @@ -623,12 +622,14 @@ err_disable_device: return ERR_PTR(err); } -static void efa_remove_device(struct pci_dev *pdev) +static void efa_remove_device(struct pci_dev *pdev, + enum efa_regs_reset_reason_types reset_reason) { struct efa_dev *dev = pci_get_drvdata(pdev); struct efa_com_dev *edev; edev = &dev->edev; + efa_com_dev_reset(edev, reset_reason); efa_com_admin_destroy(edev); efa_free_irq(dev, &dev->admin_irq); efa_disable_msix(dev); @@ -656,7 +657,7 @@ static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_remove_device: - efa_remove_device(pdev); + efa_remove_device(pdev, EFA_REGS_RESET_INIT_ERR); return err; } @@ -665,7 +666,7 @@ static void efa_remove(struct pci_dev *pdev) struct efa_dev *dev = pci_get_drvdata(pdev); efa_ib_device_remove(dev); - efa_remove_device(pdev); + efa_remove_device(pdev, EFA_REGS_RESET_NORMAL); } static struct pci_driver efa_pci_driver = { diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c index 74f6348f240a..1156ae62c468 100644 --- a/drivers/infiniband/hw/erdma/erdma_cm.c +++ b/drivers/infiniband/hw/erdma/erdma_cm.c @@ -704,7 +704,6 @@ error: erdma_cancel_mpatimer(new_cep); erdma_cep_put(new_cep); - new_cep->sock = NULL; } if (new_s) { diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index 11a78ceae568..950c133d4220 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -153,8 +153,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, return total; } -int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, - int buf_cnt, struct ib_umem *umem, +int hns_roce_get_umem_bufs(dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem, unsigned int page_shift) { struct ib_block_iter biter; diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index ff177466de9b..5e0d78f4e545 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -133,14 +133,12 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct ib_device *ibdev = &hr_dev->ib_dev; u64 mtts[MTT_MIN_COUNT] = {}; - dma_addr_t dma_handle; int ret; - ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), - &dma_handle); - if (!ret) { + ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts)); + if (ret) { ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret); - return -EINVAL; + return ret; } /* Get CQC memory HEM(Hardware Entry Memory) table */ @@ -157,7 +155,8 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) goto err_put; } - ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle); + ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, + hns_roce_get_mtr_ba(&hr_cq->mtr)); if (ret) goto err_xa; @@ -180,8 +179,8 @@ static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC, hr_cq->cqn); if (ret) - dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret, - hr_cq->cqn); + dev_err_ratelimited(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", + ret, hr_cq->cqn); xa_erase_irq(&cq_table->array, hr_cq->cqn); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index a2bdfa026c56..060797001353 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -882,8 +882,7 @@ struct hns_roce_hw { int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, int flags, void *mb_buf); - int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, - struct hns_roce_mr *mr); + int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr); int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw); void (*write_cqc)(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, @@ -1117,8 +1116,13 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); /* hns roce hw need current block and next block addr from mtt */ #define MTT_MIN_COUNT 2 +static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr) +{ + return mtr->hem_cfg.root_ba; +} + int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); + u32 offset, u64 *mtt_buf, int mtt_max); int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct hns_roce_buf_attr *buf_attr, unsigned int page_shift, struct ib_udata *udata, @@ -1177,7 +1181,7 @@ struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, int buf_cnt, struct hns_roce_buf *buf, unsigned int page_shift); -int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs, +int hns_roce_get_umem_bufs(dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem, unsigned int page_shift); @@ -1228,6 +1232,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); +void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn); void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type); u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index); void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 2c8f0fd9557d..173ab794fa78 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -337,7 +337,7 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev, struct hns_roce_hem_mhop *mhop, struct hns_roce_hem_index *index) { - struct ib_device *ibdev = &hr_dev->ib_dev; + struct device *dev = hr_dev->dev; unsigned long mhop_obj = obj; u32 l0_idx, l1_idx, l2_idx; u32 chunk_ba_num; @@ -368,14 +368,14 @@ static int calc_hem_config(struct hns_roce_dev *hr_dev, index->buf = l0_idx; break; default: - ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n", - table->type, mhop->hop_num); + dev_err(dev, "table %u not support mhop.hop_num = %u!\n", + table->type, mhop->hop_num); return -EINVAL; } if (unlikely(index->buf >= table->num_hem)) { - ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n", - table->type, index->buf, table->num_hem); + dev_err(dev, "table %u exceed hem limt idx %llu, max %lu!\n", + table->type, index->buf, table->num_hem); return -EINVAL; } @@ -487,14 +487,14 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_mhop *mhop, struct hns_roce_hem_index *index) { - struct ib_device *ibdev = &hr_dev->ib_dev; + struct device *dev = hr_dev->dev; u32 step_idx; int ret = 0; if (index->inited & HEM_INDEX_L0) { ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0); if (ret) { - ibdev_err(ibdev, "set HEM step 0 failed!\n"); + dev_err(dev, "set HEM step 0 failed!\n"); goto out; } } @@ -502,7 +502,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, if (index->inited & HEM_INDEX_L1) { ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1); if (ret) { - ibdev_err(ibdev, "set HEM step 1 failed!\n"); + dev_err(dev, "set HEM step 1 failed!\n"); goto out; } } @@ -514,7 +514,7 @@ static int set_mhop_hem(struct hns_roce_dev *hr_dev, step_idx = mhop->hop_num; ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx); if (ret) - ibdev_err(ibdev, "set HEM step last failed!\n"); + dev_err(dev, "set HEM step last failed!\n"); } out: return ret; @@ -524,14 +524,14 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, unsigned long obj) { - struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_hem_index index = {}; struct hns_roce_hem_mhop mhop = {}; + struct device *dev = hr_dev->dev; int ret; ret = calc_hem_config(hr_dev, table, obj, &mhop, &index); if (ret) { - ibdev_err(ibdev, "calc hem config failed!\n"); + dev_err(dev, "calc hem config failed!\n"); return ret; } @@ -543,7 +543,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, ret = alloc_mhop_hem(hr_dev, table, &mhop, &index); if (ret) { - ibdev_err(ibdev, "alloc mhop hem failed!\n"); + dev_err(dev, "alloc mhop hem failed!\n"); goto out; } @@ -551,7 +551,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, if (table->type < HEM_TYPE_MTT) { ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index); if (ret) { - ibdev_err(ibdev, "set HEM address to HW failed!\n"); + dev_err(dev, "set HEM address to HW failed!\n"); goto err_alloc; } } @@ -615,10 +615,11 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_mhop *mhop, struct hns_roce_hem_index *index) { - struct ib_device *ibdev = &hr_dev->ib_dev; + struct device *dev = hr_dev->dev; u32 hop_num = mhop->hop_num; u32 chunk_ba_num; u32 step_idx; + int ret; index->inited = HEM_INDEX_BUF; chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN; @@ -642,16 +643,24 @@ static void clear_mhop_hem(struct hns_roce_dev *hr_dev, else step_idx = hop_num; - if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx)) - ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num); + ret = hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx); + if (ret) + dev_warn(dev, "failed to clear hop%u HEM, ret = %d.\n", + hop_num, ret); - if (index->inited & HEM_INDEX_L1) - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1)) - ibdev_warn(ibdev, "failed to clear HEM step 1.\n"); + if (index->inited & HEM_INDEX_L1) { + ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 1); + if (ret) + dev_warn(dev, "failed to clear HEM step 1, ret = %d.\n", + ret); + } - if (index->inited & HEM_INDEX_L0) - if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0)) - ibdev_warn(ibdev, "failed to clear HEM step 0.\n"); + if (index->inited & HEM_INDEX_L0) { + ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0); + if (ret) + dev_warn(dev, "failed to clear HEM step 0, ret = %d.\n", + ret); + } } } @@ -660,14 +669,14 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, unsigned long obj, int check_refcount) { - struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_hem_index index = {}; struct hns_roce_hem_mhop mhop = {}; + struct device *dev = hr_dev->dev; int ret; ret = calc_hem_config(hr_dev, table, obj, &mhop, &index); if (ret) { - ibdev_err(ibdev, "calc hem config failed!\n"); + dev_err(dev, "calc hem config failed!\n"); return; } @@ -688,6 +697,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, { struct device *dev = hr_dev->dev; unsigned long i; + int ret; if (hns_roce_check_whether_mhop(hr_dev, table->type)) { hns_roce_table_mhop_put(hr_dev, table, obj, 1); @@ -700,8 +710,10 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, &table->mutex)) return; - if (hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT)) - dev_warn(dev, "failed to clear HEM base address.\n"); + ret = hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT); + if (ret) + dev_warn_ratelimited(dev, "failed to clear HEM base address, ret = %d.\n", + ret); hns_roce_free_hem(hr_dev, table->hem[i]); table->hem[i] = NULL; @@ -917,6 +929,8 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, { struct device *dev = hr_dev->dev; unsigned long i; + int obj; + int ret; if (hns_roce_check_whether_mhop(hr_dev, table->type)) { hns_roce_cleanup_mhop_hem_table(hr_dev, table); @@ -925,9 +939,11 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, for (i = 0; i < table->num_hem; ++i) if (table->hem[i]) { - if (hr_dev->hw->clear_hem(hr_dev, table, - i * table->table_chunk_size / table->obj_size, 0)) - dev_err(dev, "clear HEM base address failed.\n"); + obj = i * table->table_chunk_size / table->obj_size; + ret = hr_dev->hw->clear_hem(hr_dev, table, obj, 0); + if (ret) + dev_err(dev, "clear HEM base address failed, ret = %d.\n", + ret); hns_roce_free_hem(hr_dev, table->hem[i]); } @@ -970,6 +986,7 @@ struct hns_roce_hem_item { size_t count; /* max ba numbers */ int start; /* start buf offset in this hem */ int end; /* end buf offset in this hem */ + bool exist_bt; }; /* All HEM items are linked in a tree structure */ @@ -998,6 +1015,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count, } } + hem->exist_bt = exist_bt; hem->count = count; hem->start = start; hem->end = end; @@ -1008,34 +1026,32 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count, } static void hem_list_free_item(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_item *hem, bool exist_bt) + struct hns_roce_hem_item *hem) { - if (exist_bt) + if (hem->exist_bt) dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN, hem->addr, hem->dma_addr); kfree(hem); } static void hem_list_free_all(struct hns_roce_dev *hr_dev, - struct list_head *head, bool exist_bt) + struct list_head *head) { struct hns_roce_hem_item *hem, *temp_hem; list_for_each_entry_safe(hem, temp_hem, head, list) { list_del(&hem->list); - hem_list_free_item(hr_dev, hem, exist_bt); + hem_list_free_item(hr_dev, hem); } } -static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr, - u64 table_addr) +static void hem_list_link_bt(void *base_addr, u64 table_addr) { *(u64 *)(base_addr) = table_addr; } /* assign L0 table address to hem from root bt */ -static void hem_list_assign_bt(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_item *hem, void *cpu_addr, +static void hem_list_assign_bt(struct hns_roce_hem_item *hem, void *cpu_addr, u64 phy_addr) { hem->addr = cpu_addr; @@ -1125,6 +1141,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions, for (i = 0; i < region_cnt; i++) { r = (struct hns_roce_buf_region *)®ions[i]; + /* when r->hopnum = 0, the region should not occupy root_ba. */ + if (!r->hopnum) + continue; + if (r->hopnum > 1) { step = hem_list_calc_ba_range(r->hopnum, 1, unit); if (step > 0) @@ -1206,8 +1226,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, if (level > 1) { pre = hem_ptrs[level - 1]; step = (cur->start - pre->start) / step * BA_BYTE_LEN; - hem_list_link_bt(hr_dev, pre->addr + step, - cur->dma_addr); + hem_list_link_bt(pre->addr + step, cur->dma_addr); } } @@ -1219,7 +1238,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev, err_exit: for (level = 1; level < hopnum; level++) - hem_list_free_all(hr_dev, &temp_list[level], true); + hem_list_free_all(hr_dev, &temp_list[level]); return ret; } @@ -1260,16 +1279,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base, { struct hns_roce_hem_item *hem; + /* This is on the has_mtt branch, if r->hopnum + * is 0, there is no root_ba to reuse for the + * region's fake hem, so a dma_alloc request is + * necessary here. + */ hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1, - r->count, false); + r->count, !r->hopnum); if (!hem) return -ENOMEM; - hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base); + /* The root_ba can be reused only when r->hopnum > 0. */ + if (r->hopnum) + hem_list_assign_bt(hem, cpu_base, phy_base); list_add(&hem->list, branch_head); list_add(&hem->sibling, leaf_head); - return r->count; + /* If r->hopnum == 0, 0 is returned, + * so that the root_bt entry is not occupied. + */ + return r->hopnum ? r->count : 0; } static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base, @@ -1288,7 +1317,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base, /* if exist mid bt, link L1 to L0 */ list_for_each_entry_safe(hem, temp_hem, branch_head, list) { offset = (hem->start - r->offset) / step * BA_BYTE_LEN; - hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr); + hem_list_link_bt(cpu_base + offset, hem->dma_addr); total++; } @@ -1313,7 +1342,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list, return -ENOMEM; total = 0; - for (i = 0; i < region_cnt && total < max_ba_num; i++) { + for (i = 0; i < region_cnt && total <= max_ba_num; i++) { r = ®ions[i]; if (!r->count) continue; @@ -1379,14 +1408,19 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev, region_cnt); if (ret) { for (i = 0; i < region_cnt; i++) - hem_list_free_all(hr_dev, &head.branch[i], false); + hem_list_free_all(hr_dev, &head.branch[i]); - hem_list_free_all(hr_dev, &head.root, true); + hem_list_free_all(hr_dev, &head.root); } return ret; } +/* This is the bottom bt pages number of a 100G MR on 4K OS, assuming + * the bt page size is not expanded by cal_best_bt_pg_sz() + */ +#define RESCHED_LOOP_CNT_THRESHOLD_ON_4K 12800 + /* construct the base address table and link them by address hop config */ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list, @@ -1395,6 +1429,7 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, { const struct hns_roce_buf_region *r; int ofs, end; + int loop; int unit; int ret; int i; @@ -1412,7 +1447,10 @@ int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev, continue; end = r->offset + r->count; - for (ofs = r->offset; ofs < end; ofs += unit) { + for (ofs = r->offset, loop = 1; ofs < end; ofs += unit, loop++) { + if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K)) + cond_resched(); + ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs, hem_list->mid_bt[i], &hem_list->btm_bt); @@ -1444,10 +1482,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev, for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++) for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++) - hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j], - j != 0); + hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]); - hem_list_free_all(hr_dev, &hem_list->root_bt, true); + hem_list_free_all(hr_dev, &hem_list->root_bt); INIT_LIST_HEAD(&hem_list->btm_bt); hem_list->root_ba = 0; } @@ -1470,9 +1507,14 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev, struct list_head *head = &hem_list->btm_bt; struct hns_roce_hem_item *hem, *temp_hem; void *cpu_base = NULL; + int loop = 1; int nr = 0; list_for_each_entry_safe(hem, temp_hem, head, sibling) { + if (!(loop % RESCHED_LOOP_CNT_THRESHOLD_ON_4K)) + cond_resched(); + loop++; + if (hem_list_page_is_in_range(hem, offset)) { nr = offset - hem->start; cpu_base = hem->addr + nr * BA_BYTE_LEN; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 56c0e87c494e..ab0dca9d199a 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -372,26 +372,12 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, static int check_send_valid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { - struct ib_device *ibdev = &hr_dev->ib_dev; - struct ib_qp *ibqp = &hr_qp->ibqp; - - if (unlikely(ibqp->qp_type != IB_QPT_RC && - ibqp->qp_type != IB_QPT_GSI && - ibqp->qp_type != IB_QPT_UD)) { - ibdev_err(ibdev, "not supported QP(0x%x)type!\n", - ibqp->qp_type); - return -EOPNOTSUPP; - } else if (unlikely(hr_qp->state == IB_QPS_RESET || - hr_qp->state == IB_QPS_INIT || - hr_qp->state == IB_QPS_RTR)) { - ibdev_err(ibdev, "failed to post WQE, QP state %u!\n", - hr_qp->state); + if (unlikely(hr_qp->state == IB_QPS_RESET || + hr_qp->state == IB_QPS_INIT || + hr_qp->state == IB_QPS_RTR)) return -EINVAL; - } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { - ibdev_err(ibdev, "failed to post WQE, dev state %d!\n", - hr_dev->state); + else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) return -EIO; - } return 0; } @@ -485,7 +471,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp, valid_num_sge = calc_wr_sge_num(wr, &msg_len); ret = set_ud_opcode(ud_sq_wqe, wr); - if (WARN_ON(ret)) + if (WARN_ON_ONCE(ret)) return ret; ud_sq_wqe->msg_len = cpu_to_le32(msg_len); @@ -589,10 +575,10 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp, rc_sq_wqe->msg_len = cpu_to_le32(msg_len); ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr); - if (WARN_ON(ret)) + if (WARN_ON_ONCE(ret)) return ret; - hr_reg_write(rc_sq_wqe, RC_SEND_WQE_FENCE, + hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO, (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SE, @@ -687,6 +673,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, #define HNS_ROCE_SL_SHIFT 2 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; + if (unlikely(qp->state == IB_QPS_ERR)) { + flush_cqe(hr_dev, qp); + return; + } /* All kinds of DirectWQE have the same header field layout */ hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG); hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl); @@ -777,17 +767,6 @@ out: static int check_recv_valid(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { - struct ib_device *ibdev = &hr_dev->ib_dev; - struct ib_qp *ibqp = &hr_qp->ibqp; - - if (unlikely(ibqp->qp_type != IB_QPT_RC && - ibqp->qp_type != IB_QPT_GSI && - ibqp->qp_type != IB_QPT_UD)) { - ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n", - ibqp->qp_type); - return -EOPNOTSUPP; - } - if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) return -EIO; @@ -2863,8 +2842,8 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev, ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT, IB_QPS_INIT); if (ret) { - ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n", - ret); + ibdev_err_ratelimited(ibdev, "failed to modify qp to init, ret = %d.\n", + ret); return ret; } @@ -3314,21 +3293,22 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 }; struct ib_device *ibdev = &hr_dev->ib_dev; dma_addr_t pbl_ba; - int i, count; + int ret; + int i; - count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, - min_t(int, ARRAY_SIZE(pages), mr->npages), - &pbl_ba); - if (count < 1) { - ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n", - count); - return -ENOBUFS; + ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, + min_t(int, ARRAY_SIZE(pages), mr->npages)); + if (ret) { + ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret); + return ret; } /* Aligned to the hardware address access unit */ - for (i = 0; i < count; i++) + for (i = 0; i < ARRAY_SIZE(pages); i++) pages[i] >>= 6; + pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); + mpt_entry->pbl_size = cpu_to_le32(mr->npages); mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3); hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3)); @@ -3424,21 +3404,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, return ret; } -static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, - void *mb_buf, struct hns_roce_mr *mr) +static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr) { - struct ib_device *ibdev = &hr_dev->ib_dev; + dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); struct hns_roce_v2_mpt_entry *mpt_entry; - dma_addr_t pbl_ba = 0; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); - if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) { - ibdev_err(ibdev, "failed to find frmr mtr.\n"); - return -ENOBUFS; - } - hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE); hr_reg_write(mpt_entry, MPT_PD, mr->pd); @@ -3510,8 +3483,8 @@ static int free_mr_post_send_lp_wqe(struct hns_roce_qp *hr_qp) ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr); if (ret) { - ibdev_err(ibdev, "failed to post wqe for free mr, ret = %d.\n", - ret); + ibdev_err_ratelimited(ibdev, "failed to post wqe for free mr, ret = %d.\n", + ret); return ret; } @@ -3550,9 +3523,9 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) ret = free_mr_post_send_lp_wqe(hr_qp); if (ret) { - ibdev_err(ibdev, - "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n", - hr_qp->qpn, ret); + ibdev_err_ratelimited(ibdev, + "failed to send wqe (qp:0x%lx) for free mr, ret = %d.\n", + hr_qp->qpn, ret); break; } @@ -3563,16 +3536,16 @@ static void free_mr_send_cmd_to_hw(struct hns_roce_dev *hr_dev) while (cqe_cnt) { npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc); if (npolled < 0) { - ibdev_err(ibdev, - "failed to poll cqe for free mr, remain %d cqe.\n", - cqe_cnt); + ibdev_err_ratelimited(ibdev, + "failed to poll cqe for free mr, remain %d cqe.\n", + cqe_cnt); goto out; } if (time_after(jiffies, end)) { - ibdev_err(ibdev, - "failed to poll cqe for free mr and timeout, remain %d cqe.\n", - cqe_cnt); + ibdev_err_ratelimited(ibdev, + "failed to poll cqe for free mr and timeout, remain %d cqe.\n", + cqe_cnt); goto out; } cqe_cnt -= npolled; @@ -4397,8 +4370,7 @@ static void set_access_flags(struct hns_roce_qp *hr_qp, } static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp, - struct hns_roce_v2_qp_context *context, - struct hns_roce_v2_qp_context *qpc_mask) + struct hns_roce_v2_qp_context *context) { hr_reg_write(context, QPC_SGE_SHIFT, to_hr_hem_entries_shift(hr_qp->sge.sge_cnt, @@ -4420,7 +4392,6 @@ static inline int get_pdn(struct ib_pd *ib_pd) } static void modify_qp_reset_to_init(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { @@ -4439,7 +4410,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs)); - set_qpc_wqe_cnt(hr_qp, context, qpc_mask); + set_qpc_wqe_cnt(hr_qp, context); /* No VLAN need to set 0xFFF */ hr_reg_write(context, QPC_VLAN_ID, 0xfff); @@ -4484,7 +4455,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, } static void modify_qp_init_to_init(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { @@ -4521,17 +4491,20 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, { u64 mtts[MTT_MIN_COUNT] = { 0 }; u64 wqe_sge_ba; - int count; + int ret; /* Search qp buf's mtts */ - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts, - MTT_MIN_COUNT, &wqe_sge_ba); - if (hr_qp->rq.wqe_cnt && count < 1) { + ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts, + MTT_MIN_COUNT); + if (hr_qp->rq.wqe_cnt && ret) { ibdev_err(&hr_dev->ib_dev, - "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn); - return -EINVAL; + "failed to find QP(0x%lx) RQ WQE buf, ret = %d.\n", + hr_qp->qpn, ret); + return ret; } + wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr); + context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); qpc_mask->wqe_sge_ba = 0; @@ -4595,23 +4568,23 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, struct ib_device *ibdev = &hr_dev->ib_dev; u64 sge_cur_blk = 0; u64 sq_cur_blk = 0; - int count; + int ret; /* search qp buf's mtts */ - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL); - if (count < 1) { - ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n", - hr_qp->qpn); - return -EINVAL; + ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset, + &sq_cur_blk, 1); + if (ret) { + ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n", + hr_qp->qpn, ret); + return ret; } if (hr_qp->sge.sge_cnt > 0) { - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, - hr_qp->sge.offset, - &sge_cur_blk, 1, NULL); - if (count < 1) { - ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n", - hr_qp->qpn); - return -EINVAL; + ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, + hr_qp->sge.offset, &sge_cur_blk, 1); + if (ret) { + ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n", + hr_qp->qpn, ret); + return ret; } } @@ -4779,8 +4752,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, return 0; } -static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, int attr_mask, +static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, int attr_mask, struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *qpc_mask) { @@ -5143,22 +5115,19 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); int ret = 0; - if (!check_qp_state(cur_state, new_state)) { - ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n"); + if (!check_qp_state(cur_state, new_state)) return -EINVAL; - } if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { memset(qpc_mask, 0, hr_dev->caps.qpc_sz); - modify_qp_reset_to_init(ibqp, attr, context, qpc_mask); + modify_qp_reset_to_init(ibqp, context, qpc_mask); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { - modify_qp_init_to_init(ibqp, attr, context, qpc_mask); + modify_qp_init_to_init(ibqp, context, qpc_mask); } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context, qpc_mask); } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { - ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context, - qpc_mask); + ret = modify_qp_rtr_to_rts(ibqp, attr_mask, context, qpc_mask); } return ret; @@ -5408,7 +5377,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, /* SW pass context to HW */ ret = hns_roce_v2_qp_modify(hr_dev, context, qpc_mask, hr_qp); if (ret) { - ibdev_err(ibdev, "failed to modify QP, ret = %d.\n", ret); + ibdev_err_ratelimited(ibdev, "failed to modify QP, ret = %d.\n", ret); goto out; } @@ -5498,7 +5467,9 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context); if (ret) { - ibdev_err(ibdev, "failed to query QPC, ret = %d.\n", ret); + ibdev_err_ratelimited(ibdev, + "failed to query QPC, ret = %d.\n", + ret); ret = -EINVAL; goto out; } @@ -5506,7 +5477,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, state = hr_reg_read(&context, QPC_QP_ST); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); if (tmp_qp_state == -1) { - ibdev_err(ibdev, "Illegal ib_qp_state\n"); + ibdev_err_ratelimited(ibdev, "Illegal ib_qp_state\n"); ret = -EINVAL; goto out; } @@ -5599,9 +5570,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); if (ret) - ibdev_err(ibdev, - "failed to modify QP to RST, ret = %d.\n", - ret); + ibdev_err_ratelimited(ibdev, + "failed to modify QP to RST, ret = %d.\n", + ret); } send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; @@ -5637,9 +5608,9 @@ int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); if (ret) - ibdev_err(&hr_dev->ib_dev, - "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n", - hr_qp->qpn, ret); + ibdev_err_ratelimited(&hr_dev->ib_dev, + "failed to destroy QP, QPN = 0x%06lx, ret = %d.\n", + hr_qp->qpn, ret); hns_roce_qp_destroy(hr_dev, hr_qp, udata); @@ -5714,18 +5685,20 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq, struct ib_device *ibdev = srq->ibsrq.device; struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); u64 mtts_idx[MTT_MIN_COUNT] = {}; - dma_addr_t dma_handle_idx = 0; + dma_addr_t dma_handle_idx; int ret; /* Get physical address of idx que buf */ ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx, - ARRAY_SIZE(mtts_idx), &dma_handle_idx); - if (ret < 1) { + ARRAY_SIZE(mtts_idx)); + if (ret) { ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n", ret); - return -ENOBUFS; + return ret; } + dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr); + hr_reg_write(ctx, SRQC_IDX_HOP_NUM, to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt)); @@ -5757,20 +5730,22 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf) struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); struct hns_roce_srq_context *ctx = mb_buf; u64 mtts_wqe[MTT_MIN_COUNT] = {}; - dma_addr_t dma_handle_wqe = 0; + dma_addr_t dma_handle_wqe; int ret; memset(ctx, 0, sizeof(*ctx)); /* Get the physical address of srq buf */ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, - ARRAY_SIZE(mtts_wqe), &dma_handle_wqe); - if (ret < 1) { + ARRAY_SIZE(mtts_wqe)); + if (ret) { ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n", ret); - return -ENOBUFS; + return ret; } + dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr); + hr_reg_write(ctx, SRQC_SRQ_ST, 1); hr_reg_write_bool(ctx, SRQC_SRQ_TYPE, srq->ibsrq.srq_type == IB_SRQT_XRC); @@ -5912,9 +5887,9 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn); hns_roce_free_cmd_mailbox(hr_dev, mailbox); if (ret) - ibdev_err(&hr_dev->ib_dev, - "failed to process cmd when modifying CQ, ret = %d.\n", - ret); + ibdev_err_ratelimited(&hr_dev->ib_dev, + "failed to process cmd when modifying CQ, ret = %d.\n", + ret); return ret; } @@ -5934,9 +5909,9 @@ static int hns_roce_v2_query_cqc(struct hns_roce_dev *hr_dev, u32 cqn, ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_CQC, cqn); if (ret) { - ibdev_err(&hr_dev->ib_dev, - "failed to process cmd when querying CQ, ret = %d.\n", - ret); + ibdev_err_ratelimited(&hr_dev->ib_dev, + "failed to process cmd when querying CQ, ret = %d.\n", + ret); goto err_mailbox; } @@ -5977,11 +5952,10 @@ err_mailbox: return ret; } -static void hns_roce_irq_work_handle(struct work_struct *work) +static void dump_aeqe_log(struct hns_roce_work *irq_work) { - struct hns_roce_work *irq_work = - container_of(work, struct hns_roce_work, work); - struct ib_device *ibdev = &irq_work->hr_dev->ib_dev; + struct hns_roce_dev *hr_dev = irq_work->hr_dev; + struct ib_device *ibdev = &hr_dev->ib_dev; switch (irq_work->event_type) { case HNS_ROCE_EVENT_TYPE_PATH_MIG: @@ -6025,6 +5999,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work) case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: ibdev_warn(ibdev, "DB overflow.\n"); break; + case HNS_ROCE_EVENT_TYPE_MB: + break; case HNS_ROCE_EVENT_TYPE_FLR: ibdev_warn(ibdev, "function level reset.\n"); break; @@ -6035,8 +6011,46 @@ static void hns_roce_irq_work_handle(struct work_struct *work) ibdev_err(ibdev, "invalid xrceth error.\n"); break; default: + ibdev_info(ibdev, "Undefined event %d.\n", + irq_work->event_type); break; } +} + +static void hns_roce_irq_work_handle(struct work_struct *work) +{ + struct hns_roce_work *irq_work = + container_of(work, struct hns_roce_work, work); + struct hns_roce_dev *hr_dev = irq_work->hr_dev; + int event_type = irq_work->event_type; + u32 queue_num = irq_work->queue_num; + + switch (event_type) { + case HNS_ROCE_EVENT_TYPE_PATH_MIG: + case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: + case HNS_ROCE_EVENT_TYPE_COMM_EST: + case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: + case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: + case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: + case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: + case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: + case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: + case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: + hns_roce_qp_event(hr_dev, queue_num, event_type); + break; + case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: + case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: + hns_roce_srq_event(hr_dev, queue_num, event_type); + break; + case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: + case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: + hns_roce_cq_event(hr_dev, queue_num, event_type); + break; + default: + break; + } + + dump_aeqe_log(irq_work); kfree(irq_work); } @@ -6097,14 +6111,14 @@ static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq) static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) { - struct device *dev = hr_dev->dev; struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq); irqreturn_t aeqe_found = IRQ_NONE; + int num_aeqes = 0; int event_type; u32 queue_num; int sub_type; - while (aeqe) { + while (aeqe && num_aeqes < HNS_AEQ_POLLING_BUDGET) { /* Make sure we read AEQ entry after we have checked the * ownership bit */ @@ -6115,25 +6129,12 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, queue_num = hr_reg_read(aeqe, AEQE_EVENT_QUEUE_NUM); switch (event_type) { - case HNS_ROCE_EVENT_TYPE_PATH_MIG: - case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - case HNS_ROCE_EVENT_TYPE_COMM_EST: - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION: case HNS_ROCE_EVENT_TYPE_INVALID_XRCETH: - hns_roce_qp_event(hr_dev, queue_num, event_type); - break; - case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - hns_roce_srq_event(hr_dev, queue_num, event_type); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - hns_roce_cq_event(hr_dev, queue_num, event_type); + hns_roce_flush_cqe(hr_dev, queue_num); break; case HNS_ROCE_EVENT_TYPE_MB: hns_roce_cmd_event(hr_dev, @@ -6141,12 +6142,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, aeqe->event.cmd.status, le64_to_cpu(aeqe->event.cmd.out_param)); break; - case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - case HNS_ROCE_EVENT_TYPE_FLR: - break; default: - dev_err(dev, "unhandled event %d on EQ %d at idx %u.\n", - event_type, eq->eqn, eq->cons_index); break; } @@ -6158,6 +6154,7 @@ static irqreturn_t hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev, hns_roce_v2_init_irq_work(hr_dev, eq, queue_num); aeqe = next_aeqe_sw_v2(eq); + ++num_aeqes; } update_eq_db(eq); @@ -6469,7 +6466,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, u64 eqe_ba[MTT_MIN_COUNT] = { 0 }; struct hns_roce_eq_context *eqc; u64 bt_ba = 0; - int count; + int ret; eqc = mb_buf; memset(eqc, 0, sizeof(struct hns_roce_eq_context)); @@ -6477,13 +6474,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, init_eq_config(hr_dev, eq); /* if not multi-hop, eqe buffer only use one trunk */ - count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT, - &bt_ba); - if (count < 1) { - dev_err(hr_dev->dev, "failed to find EQE mtr\n"); - return -ENOBUFS; + ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, + ARRAY_SIZE(eqe_ba)); + if (ret) { + dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret); + return ret; } + bt_ba = hns_roce_get_mtr_ba(&eq->mtr); + hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID); hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num); hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore); @@ -6687,6 +6686,9 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev) int ret; int i; + if (hr_dev->caps.aeqe_depth < HNS_AEQ_POLLING_BUDGET) + return -EINVAL; + other_num = hr_dev->caps.num_other_vectors; comp_num = hr_dev->caps.num_comp_vectors; aeq_num = hr_dev->caps.num_aeq_vectors; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 2b4dbbb06eb5..a9eff72f10c6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -114,6 +114,11 @@ #define HNS_ROCE_V2_TABLE_CHUNK_SIZE (1 << 18) +/* budget must be smaller than aeqe_depth to guarantee that we update + * the ci before we polled all the entries in the EQ. + */ +#define HNS_AEQ_POLLING_BUDGET 64 + enum { HNS_ROCE_CMD_FLAG_IN = BIT(0), HNS_ROCE_CMD_FLAG_OUT = BIT(1), @@ -921,6 +926,7 @@ struct hns_roce_v2_rc_send_wqe { #define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7) #define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8) #define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9) +#define RC_SEND_WQE_SO RC_SEND_WQE_FIELD_LOC(10, 10) #define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11) #define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12) #define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index afe7523eca90..eae22ac42e05 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -182,7 +182,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, IB_DEVICE_RC_RNR_NAK_GEN; props->max_send_sge = hr_dev->caps.max_sq_sg; props->max_recv_sge = hr_dev->caps.max_rq_sg; - props->max_sge_rd = 1; + props->max_sge_rd = hr_dev->caps.max_sq_sg; props->max_cq = hr_dev->caps.num_cqs; props->max_cqe = hr_dev->caps.max_cqes; props->max_mr = hr_dev->caps.num_mtpts; @@ -642,7 +642,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) if (ret) return ret; } - dma_set_max_seg_size(dev, UINT_MAX); + dma_set_max_seg_size(dev, SZ_2G); ret = ib_register_device(ib_dev, "hns_%d", dev); if (ret) { dev_err(dev, "ib_register_device failed!\n"); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 980261969b0c..408ef2a96149 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -130,8 +130,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1)); if (ret) - ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n", - ret); + ibdev_warn_ratelimited(ibdev, "failed to destroy mpt, ret = %d.\n", + ret); } free_mr_pbl(hr_dev, mr); @@ -154,7 +154,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, if (mr->type != MR_TYPE_FRMR) ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); else - ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); + ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr); if (ret) { dev_err(dev, "failed to write mtpt, ret = %d.\n", ret); goto err_page; @@ -415,15 +415,16 @@ static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr) } int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, - unsigned int *sg_offset) + unsigned int *sg_offset_p) { + unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0; struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device); struct ib_device *ibdev = &hr_dev->ib_dev; struct hns_roce_mr *mr = to_hr_mr(ibmr); struct hns_roce_mtr *mtr = &mr->pbl_mtr; int ret, sg_num = 0; - if (!IS_ALIGNED(*sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) || + if (!IS_ALIGNED(sg_offset, HNS_ROCE_FRMR_ALIGN_SIZE) || ibmr->page_size < HNS_HW_PAGE_SIZE || ibmr->page_size > HNS_HW_MAX_PAGE_SIZE) return sg_num; @@ -434,7 +435,7 @@ int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, if (!mr->page_list) return sg_num; - sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page); + sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset_p, hns_roce_set_page); if (sg_num < 1) { ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n", mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num); @@ -713,7 +714,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, return -ENOMEM; if (mtr->umem) - npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count, + npage = hns_roce_get_umem_bufs(pages, page_count, mtr->umem, page_shift); else npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count, @@ -766,11 +767,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count && mapped_cnt < page_cnt; i++) { r = &mtr->hem_cfg.region[i]; - /* if hopnum is 0, no need to map pages in this region */ - if (!r->hopnum) { - mapped_cnt += r->count; - continue; - } if (r->offset + r->count > page_cnt) { ret = -EINVAL; @@ -801,47 +797,53 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, return ret; } -int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) +static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg, + u32 start_index, u64 *mtt_buf, + int mtt_cnt) { - struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; - int mtt_count, left; - u32 start_index; + int mtt_count; int total = 0; - __le64 *mtts; u32 npage; u64 addr; - if (!mtt_buf || mtt_max < 1) - goto done; + if (mtt_cnt > cfg->region_count) + return -EINVAL; - /* no mtt memory in direct mode, so just return the buffer address */ - if (cfg->is_direct) { - start_index = offset >> HNS_HW_PAGE_SHIFT; - for (mtt_count = 0; mtt_count < cfg->region_count && - total < mtt_max; mtt_count++) { - npage = cfg->region[mtt_count].offset; - if (npage < start_index) - continue; + for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt; + mtt_count++) { + npage = cfg->region[mtt_count].offset; + if (npage < start_index) + continue; - addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); - mtt_buf[total] = addr; + addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); + mtt_buf[total] = addr; - total++; - } - - goto done; + total++; } - start_index = offset >> cfg->buf_pg_shift; - left = mtt_max; + if (!total) + return -ENOENT; + + return 0; +} + +static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev, + struct hns_roce_mtr *mtr, u32 start_index, + u64 *mtt_buf, int mtt_cnt) +{ + int left = mtt_cnt; + int total = 0; + int mtt_count; + __le64 *mtts; + u32 npage; + while (left > 0) { mtt_count = 0; mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, start_index + total, &mtt_count); if (!mtts || !mtt_count) - goto done; + break; npage = min(mtt_count, left); left -= npage; @@ -849,11 +851,33 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]); } -done: - if (base_addr) - *base_addr = cfg->root_ba; + if (!total) + return -ENOENT; - return total; + return 0; +} + +int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, + u32 offset, u64 *mtt_buf, int mtt_max) +{ + struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; + u32 start_index; + int ret; + + if (!mtt_buf || mtt_max < 1) + return -EINVAL; + + /* no mtt memory in direct mode, so just return the buffer address */ + if (cfg->is_direct) { + start_index = offset >> HNS_HW_PAGE_SHIFT; + ret = hns_roce_get_direct_addr_mtt(cfg, start_index, + mtt_buf, mtt_max); + } else { + start_index = offset >> cfg->buf_pg_shift; + ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index, + mtt_buf, mtt_max); + } + return ret; } static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e158ed9e6c83..0f0351abe9b4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -39,6 +39,25 @@ #include "hns_roce_device.h" #include "hns_roce_hem.h" +static struct hns_roce_qp *hns_roce_qp_lookup(struct hns_roce_dev *hr_dev, + u32 qpn) +{ + struct device *dev = hr_dev->dev; + struct hns_roce_qp *qp; + unsigned long flags; + + xa_lock_irqsave(&hr_dev->qp_table_xa, flags); + qp = __hns_roce_qp_lookup(hr_dev, qpn); + if (qp) + refcount_inc(&qp->refcount); + xa_unlock_irqrestore(&hr_dev->qp_table_xa, flags); + + if (!qp) + dev_warn(dev, "async event for bogus QP %08x\n", qpn); + + return qp; +} + static void flush_work_handle(struct work_struct *work) { struct hns_roce_work *flush_work = container_of(work, @@ -95,29 +114,11 @@ void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) { - struct device *dev = hr_dev->dev; struct hns_roce_qp *qp; - xa_lock(&hr_dev->qp_table_xa); - qp = __hns_roce_qp_lookup(hr_dev, qpn); - if (qp) - refcount_inc(&qp->refcount); - xa_unlock(&hr_dev->qp_table_xa); - - if (!qp) { - dev_warn(dev, "async event for bogus QP %08x\n", qpn); + qp = hns_roce_qp_lookup(hr_dev, qpn); + if (!qp) return; - } - - if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || - event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { - qp->state = IB_QPS_ERR; - - flush_cqe(hr_dev, qp); - } qp->event(qp, (enum hns_roce_event)event_type); @@ -125,6 +126,21 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) complete(&qp->free); } +void hns_roce_flush_cqe(struct hns_roce_dev *hr_dev, u32 qpn) +{ + struct hns_roce_qp *qp; + + qp = hns_roce_qp_lookup(hr_dev, qpn); + if (!qp) + return; + + qp->state = IB_QPS_ERR; + flush_cqe(hr_dev, qp); + + if (refcount_dec_and_test(&qp->refcount)) + complete(&qp->free); +} + static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, enum hns_roce_event type) { @@ -908,12 +924,14 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_ib_create_qp *ucmd, struct hns_roce_ib_create_qp_resp *resp) { + bool has_sdb = user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd); struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, struct hns_roce_ucontext, ibucontext); + bool has_rdb = user_qp_has_rdb(hr_dev, init_attr, udata, resp); struct ib_device *ibdev = &hr_dev->ib_dev; int ret; - if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { + if (has_sdb) { ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); if (ret) { ibdev_err(ibdev, @@ -924,7 +942,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; } - if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { + if (has_rdb) { ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); if (ret) { ibdev_err(ibdev, @@ -938,7 +956,7 @@ static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, return 0; err_sdb: - if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) + if (has_sdb) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); err_out: return ret; @@ -1123,7 +1141,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, - struct ib_pd *ib_pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct hns_roce_qp *hr_qp) @@ -1196,7 +1213,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, min(udata->outlen, sizeof(resp))); if (ret) { ibdev_err(ibdev, "copy qp resp failed!\n"); - goto err_store; + goto err_flow_ctrl; } } @@ -1277,7 +1294,6 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, struct ib_device *ibdev = qp->device; struct hns_roce_dev *hr_dev = to_hr_dev(ibdev); struct hns_roce_qp *hr_qp = to_hr_qp(qp); - struct ib_pd *pd = qp->pd; int ret; ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata); @@ -1292,7 +1308,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr, hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; } - ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp); + ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp); if (ret) ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n", init_attr->qp_type, ret); diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 727f92650071..80fcb1b0e8fd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -150,8 +150,8 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_SRQ, srq->srqn); if (ret) - dev_err(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", - ret, srq->srqn); + dev_err_ratelimited(hr_dev->dev, "DESTROY_SRQ failed (%d) for SRQN %06lx\n", + ret, srq->srqn); xa_erase_irq(&srq_table->xa, srq->srqn); @@ -249,7 +249,7 @@ static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev, hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr); } -static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) +static int alloc_srq_wrid(struct hns_roce_srq *srq) { srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL); if (!srq->wrid) @@ -365,7 +365,7 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, goto err_idx; if (!udata) { - ret = alloc_srq_wrid(hr_dev, srq); + ret = alloc_srq_wrid(srq); if (ret) goto err_wqe_buf; } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index ba47874f90d3..7c3dc86ab7f0 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -384,10 +384,10 @@ static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context) } spin_unlock_bh(&iboe->lock); - if (!ret && hw_update) { + if (gids) ret = mlx4_ib_update_gids(gids, ibdev, attr->port_num); - kfree(gids); - } + + kfree(gids); return ret; } diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 3e1272695d99..9915504ad1e1 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -444,6 +444,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, struct ib_qp *qp) { struct mlx5_ib_dev *dev = to_mdev(qp->device); + bool new = false; int err; if (!counter->id) { @@ -458,6 +459,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, return err; counter->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id); + new = true; } err = mlx5_ib_qp_set_counter(qp, counter); @@ -467,8 +469,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter, return 0; fail_set_counter: - mlx5_ib_counter_dealloc(counter); - counter->id = 0; + if (new) { + mlx5_ib_counter_dealloc(counter); + counter->id = 0; + } return err; } diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index be189e0525de..a34bbe27bb76 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -484,7 +484,7 @@ repoll: } qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; - if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { + if (!*cur_qp || (qpn != (*cur_qp)->trans_qp.base.mqp.qpn)) { /* We do not have to take the QP table lock here, * because CQs will be locked while QPs are removed * from the table. diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index bce31e28eb30..45a414e8d35f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3265,7 +3265,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list, list) { if (dev->sys_image_guid == mpi->sys_image_guid && - (mlx5_core_native_port_num(mpi->mdev) - 1) == i) { + (mlx5_core_native_port_num(mpi->mdev) - 1) == i && + mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) { bound = mlx5_ib_bind_slave_port(dev, mpi); } @@ -4280,7 +4281,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev, mutex_lock(&mlx5_ib_multiport_mutex); list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) { - if (dev->sys_image_guid == mpi->sys_image_guid) + if (dev->sys_image_guid == mpi->sys_image_guid && + mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) bound = mlx5_ib_bind_slave_port(dev, mpi); if (bound) { diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index d3cada2ae5a5..89682fe0747f 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -275,9 +275,6 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, blk_start_idx = idx; in_block = 1; } - - /* Count page invalidations */ - invalidations += idx - blk_start_idx + 1; } else { u64 umr_offset = idx & umr_block_mask; @@ -287,14 +284,19 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); in_block = 0; + /* Count page invalidations */ + invalidations += idx - blk_start_idx + 1; } } } - if (in_block) + if (in_block) { mlx5r_umr_update_xlt(mr, blk_start_idx, idx - blk_start_idx + 1, 0, MLX5_IB_UPD_XLT_ZAP | MLX5_IB_UPD_XLT_ATOMIC); + /* Count page invalidations */ + invalidations += idx - blk_start_idx + 1; + } mlx5_update_odp_stats(mr, invalidations, invalidations); @@ -808,8 +810,7 @@ static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) /* * Handle a single data segment in a page-fault WQE or RDMA region. * - * Returns number of OS pages retrieved on success. The caller may continue to - * the next data segment. + * Returns zero on success. The caller may continue to the next data segment. * Can return the following error codes: * -EAGAIN to designate a temporary error. The caller will abort handling the * page fault and resolve it. @@ -822,7 +823,7 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, u32 *bytes_committed, u32 *bytes_mapped) { - int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0; + int ret, i, outlen, cur_outlen = 0, depth = 0, pages_in_range; struct pf_frame *head = NULL, *frame; struct mlx5_ib_mkey *mmkey; struct mlx5_ib_mr *mr; @@ -865,13 +866,20 @@ next_mr: case MLX5_MKEY_MR: mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); + pages_in_range = (ALIGN(io_virt + bcnt, PAGE_SIZE) - + (io_virt & PAGE_MASK)) >> + PAGE_SHIFT; ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0, false); if (ret < 0) goto end; mlx5_update_odp_stats(mr, faults, ret); - npages += ret; + if (ret < pages_in_range) { + ret = -EFAULT; + goto end; + } + ret = 0; break; @@ -962,7 +970,7 @@ end: kfree(out); *bytes_committed = 0; - return ret ? ret : npages; + return ret; } /* @@ -981,8 +989,7 @@ end: * the committed bytes). * @receive_queue: receive WQE end of sg list * - * Returns the number of pages loaded if positive, zero for an empty WQE, or a - * negative error code. + * Returns zero for success or a negative error code. */ static int pagefault_data_segments(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault, @@ -990,7 +997,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, void *wqe_end, u32 *bytes_mapped, u32 *total_wqe_bytes, bool receive_queue) { - int ret = 0, npages = 0; + int ret = 0; u64 io_virt; u32 key; u32 byte_count; @@ -1046,10 +1053,9 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev, bytes_mapped); if (ret < 0) break; - npages += ret; } - return ret < 0 ? ret : npages; + return ret; } /* @@ -1285,12 +1291,6 @@ resolve_page_fault: free_page((unsigned long)wqe_start); } -static int pages_in_range(u64 address, u32 length) -{ - return (ALIGN(address + length, PAGE_SIZE) - - (address & PAGE_MASK)) >> PAGE_SHIFT; -} - static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, struct mlx5_pagefault *pfault) { @@ -1329,7 +1329,7 @@ static void mlx5_ib_mr_rdma_pfault_handler(struct mlx5_ib_dev *dev, if (ret == -EAGAIN) { /* We're racing with an invalidation, don't prefetch */ prefetch_activated = 0; - } else if (ret < 0 || pages_in_range(address, length) > ret) { + } else if (ret < 0) { mlx5_ib_page_fault_resume(dev, pfault, 1); if (ret != -ENOENT) mlx5_ib_dbg(dev, "PAGE FAULT error %d. QP 0x%x, type: 0x%x\n", diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 8d132b726c64..d782a494abcd 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -4466,6 +4466,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1); MLX5_SET(dctc, dctc, counter_set_id, set_id); + + qp->port = attr->port_num; } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { struct mlx5_ib_modify_qp_resp resp = {}; u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {}; @@ -4955,7 +4957,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, } if (qp_attr_mask & IB_QP_PORT) - qp_attr->port_num = MLX5_GET(dctc, dctc, port); + qp_attr->port_num = mqp->port; if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak); if (qp_attr_mask & IB_QP_AV) { diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 182a89bb24ef..caade796bc3c 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -55,6 +55,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry, struct inode *inode = new_inode(dir->i_sb); if (!inode) { + dput(dentry); error = -EPERM; goto bail; } diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index 46653ad56f5a..ec2d8f78c898 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -380,7 +380,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) if (!us_ibdev) { usnic_err("Device %s context alloc failed\n", netdev_name(pci_get_drvdata(dev))); - return ERR_PTR(-EFAULT); + return NULL; } us_ibdev->ufdev = usnic_fwd_dev_alloc(dev); @@ -500,8 +500,8 @@ static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic) } us_ibdev = usnic_ib_device_add(parent_pci); - if (IS_ERR_OR_NULL(us_ibdev)) { - us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT); + if (!us_ibdev) { + us_ibdev = ERR_PTR(-EFAULT); goto out; } @@ -569,10 +569,10 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev, } pf = usnic_ib_discover_pf(vf->vnic); - if (IS_ERR_OR_NULL(pf)) { - usnic_err("Failed to discover pf of vnic %s with err%ld\n", - pci_name(pdev), PTR_ERR(pf)); - err = pf ? PTR_ERR(pf) : -EFAULT; + if (IS_ERR(pf)) { + err = PTR_ERR(pf); + usnic_err("Failed to discover pf of vnic %s with err%d\n", + pci_name(pdev), err); goto out_clean_vnic; } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index b1a0ab3cd4bd..43dfc6fd8a3e 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -71,11 +71,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); - if (err) { - vfree(cq->queue->buf); - kfree(cq->queue); + if (err) return err; - } cq->is_user = uresp; diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index 1151c0b5ccea..02cf5e851eb0 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -221,7 +221,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable) { struct rxe_pool *pool = elem->pool; struct xarray *xa = &pool->xa; - static int timeout = RXE_POOL_TIMEOUT; int ret, err = 0; void *xa_ret; @@ -245,19 +244,19 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable) * return to rdma-core */ if (sleepable) { - if (!completion_done(&elem->complete) && timeout) { + if (!completion_done(&elem->complete)) { ret = wait_for_completion_timeout(&elem->complete, - timeout); + msecs_to_jiffies(50000)); /* Shouldn't happen. There are still references to * the object but, rather than deadlock, free the * object or pass back to rdma-core. */ if (WARN_ON(!ret)) - err = -EINVAL; + err = -ETIMEDOUT; } } else { - unsigned long until = jiffies + timeout; + unsigned long until = jiffies + RXE_POOL_TIMEOUT; /* AH objects are unique in that the destroy_ah verb * can be called in atomic context. This delay @@ -269,7 +268,7 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable) mdelay(1); if (WARN_ON(!completion_done(&elem->complete))) - err = -EINVAL; + err = -ETIMEDOUT; } if (pool->cleanup) diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 35768fdbd5b7..1ef5819e75ff 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -643,13 +643,15 @@ int rxe_requester(void *arg) if (unlikely(qp->req.state == QP_STATE_ERROR)) { wqe = req_next_wqe(qp); - if (wqe) + if (wqe) { /* * Generate an error completion for error qp state */ + wqe->status = IB_WC_WR_FLUSH_ERR; goto err; - else + } else { goto exit; + } } if (unlikely(qp->req.state == QP_STATE_RESET)) { diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index a70ccb4d4c85..8b3b9b798676 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -344,6 +344,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, struct rtrs_srv_mr *srv_mr; bool need_inval = false; enum ib_send_flags flags; + struct ib_sge list; u32 imm; int err; @@ -396,7 +397,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval); imm_wr.wr.next = NULL; if (always_invalidate) { - struct ib_sge list; struct rtrs_msg_rkey_rsp *msg; srv_mr = &srv_path->mrs[id->msg_id]; diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index c4dcef76e964..8df23ab974c1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -3983,7 +3983,6 @@ static struct srp_host *srp_add_port(struct srp_device *device, u32 port) return host; put_host: - device_del(&host->dev); put_device(&host->dev); return NULL; } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 13c36f51b935..872381221e75 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -145,6 +145,7 @@ static const struct xpad_device { { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, { 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 }, { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, + { 0x045e, 0x02a9, "Xbox 360 Wireless Receiver (Unofficial)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE }, { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", MAP_PADDLES, XTYPE_XBOXONE }, @@ -366,6 +367,7 @@ static const struct xpad_device { { 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1220, "Wooting Two HE", 0, XTYPE_XBOX360 }, + { 0x31e3, 0x1230, "Wooting Two HE (ARM)", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1300, "Wooting 60HE (AVR)", 0, XTYPE_XBOX360 }, { 0x31e3, 0x1310, "Wooting 60HE (ARM)", 0, XTYPE_XBOX360 }, { 0x3285, 0x0607, "Nacon GC-100", 0, XTYPE_XBOX360 }, diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 661d6c8b059b..b3a856333d4e 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -89,7 +89,7 @@ static const unsigned short atkbd_set2_keycode[ATKBD_KEYMAP_SIZE] = { 0, 46, 45, 32, 18, 5, 4, 95, 0, 57, 47, 33, 20, 19, 6,183, 0, 49, 48, 35, 34, 21, 7,184, 0, 0, 50, 36, 22, 8, 9,185, 0, 51, 37, 23, 24, 11, 10, 0, 0, 52, 53, 38, 39, 25, 12, 0, - 0, 89, 40, 0, 26, 13, 0, 0, 58, 54, 28, 27, 0, 43, 0, 85, + 0, 89, 40, 0, 26, 13, 0,193, 58, 54, 28, 27, 0, 43, 0, 85, 0, 86, 91, 90, 92, 0, 14, 94, 0, 79,124, 75, 71,121, 0, 0, 82, 83, 80, 76, 77, 72, 1, 69, 87, 78, 81, 74, 55, 73, 70, 99, diff --git a/drivers/input/keyboard/mtk-pmic-keys.c b/drivers/input/keyboard/mtk-pmic-keys.c index 9b34da0ec260..2d8999023468 100644 --- a/drivers/input/keyboard/mtk-pmic-keys.c +++ b/drivers/input/keyboard/mtk-pmic-keys.c @@ -133,8 +133,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys, u32 value, mask; int error; - kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs; - kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs; + kregs_home = ®s->keys_regs[MTK_PMIC_HOMEKEY_INDEX]; + kregs_pwr = ®s->keys_regs[MTK_PMIC_PWRKEY_INDEX]; error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec", &long_press_debounce); diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index d8c90a23a101..6cc66774c107 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -163,6 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = { static const char * const smbus_pnp_ids[] = { /* all of the topbuttonpad_pnp_ids are valid, we just add some extras */ + "DLL060d", /* Dell Precision M3800 */ "LEN0048", /* X1 Carbon 3 */ "LEN0046", /* X250 */ "LEN0049", /* Yoga 11e */ @@ -189,11 +190,15 @@ static const char * const smbus_pnp_ids[] = { "LEN2054", /* E480 */ "LEN2055", /* E580 */ "LEN2068", /* T14 Gen 1 */ + "SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */ + "SYN3003", /* HP EliteBook 850 G1 */ "SYN3015", /* HP EliteBook 840 G2 */ "SYN3052", /* HP EliteBook 840 G4 */ "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ "SYN3257", /* HP Envy 13-ad105ng */ + "TOS01f6", /* Dynabook Portege X30L-G */ + "TOS0213", /* Dynabook Portege X30-D */ NULL }; diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index 34d1f07ea4c3..8813db7eec39 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -1080,16 +1080,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), DMI_MATCH(DMI_BOARD_NAME, "AURA1501"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "TUXEDO"), DMI_MATCH(DMI_BOARD_NAME, "EDUBOOK1502"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* Mivvy M310 */ @@ -1159,9 +1157,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { }, /* * A lot of modern Clevo barebones have touchpad and/or keyboard issues - * after suspend fixable with nomux + reset + noloop + nopnp. Luckily, - * none of them have an external PS/2 port so this can safely be set for - * all of them. + * after suspend fixable with the forcenorestore quirk. * Clevo barebones come with board_vendor and/or system_vendor set to * either the very generic string "Notebook" and/or a different value * for each individual reseller. The only somewhat universal way to @@ -1171,29 +1167,25 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "LAPQC71A"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "LAPQC71B"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "N140CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "N141CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { @@ -1205,29 +1197,19 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { - /* - * Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes - * the keyboard very laggy for ~5 seconds after boot and - * sometimes also after resume. - * However both are required for the keyboard to not fail - * completely sometimes after boot or resume. - */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "NHxxRZQ"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, /* * At least one modern Clevo barebone has the touchpad connected both @@ -1243,17 +1225,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NS50MU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | - SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | - SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_NOAUX | + SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NS50_70MU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOAUX | SERIO_QUIRK_NOMUX | - SERIO_QUIRK_RESET_ALWAYS | SERIO_QUIRK_NOLOOP | - SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_NOAUX | + SERIO_QUIRK_FORCENORESTORE) }, { .matches = { @@ -1265,8 +1245,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "NJ50_70CU"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "P640RE"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1277,16 +1262,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xH"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1297,8 +1280,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_P67H"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1309,8 +1291,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1321,8 +1302,7 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P65_67RS"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* @@ -1333,22 +1313,43 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_PRODUCT_NAME, "P67xRP"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "PB50_70DFx,DDx"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PB51RF"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PB71RD"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PC70DR"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "PCX0DX"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "PCX0DX_GN20"), + }, + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, /* See comment on TUXEDO InfinityBook S17 Gen6 / Clevo NS70MU above */ { @@ -1361,15 +1362,13 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "X170SM"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { .matches = { DMI_MATCH(DMI_BOARD_NAME, "X170KM-G"), }, - .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS | - SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP) + .driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE) }, { /* diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index c9598c506ff9..bc78e8665551 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3610,6 +3610,14 @@ found: while (*uid == '0' && *(uid + 1)) uid++; + if (strlen(hid) >= ACPIHID_HID_LEN) { + pr_err("Invalid command line: hid is too long\n"); + return 1; + } else if (strlen(uid) >= ACPIHID_UID_LEN) { + pr_err("Invalid command line: uid is too long\n"); + return 1; + } + i = early_acpihid_map_size++; memcpy(early_acpihid_map[i].hid, hid, strlen(hid)); memcpy(early_acpihid_map[i].uid, uid, strlen(uid)); diff --git a/drivers/iommu/amd/io_pgtable_v2.c b/drivers/iommu/amd/io_pgtable_v2.c index 232d17bd941f..c86cbbc21e88 100644 --- a/drivers/iommu/amd/io_pgtable_v2.c +++ b/drivers/iommu/amd/io_pgtable_v2.c @@ -264,7 +264,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, map_size = get_alloc_page_size(pgsize); pte = v2_alloc_pte(pdom->iop.pgd, iova, map_size, &updated); if (!pte) { - ret = -EINVAL; + ret = -ENOMEM; goto out; } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 5d34416b3468..4421b464947b 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3589,7 +3589,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) * we should not modify the IRTE */ if (!dev_data || !dev_data->use_vapic) - return 0; + return -EINVAL; ir_data->cfg = irqd_cfg(data); pi_data->ir_data = ir_data; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 45b43f729f89..6a60bad48b27 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1443,26 +1443,37 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return 0; } +static int arm_smmu_streams_cmp_key(const void *lhs, const struct rb_node *rhs) +{ + struct arm_smmu_stream *stream_rhs = + rb_entry(rhs, struct arm_smmu_stream, node); + const u32 *sid_lhs = lhs; + + if (*sid_lhs < stream_rhs->id) + return -1; + if (*sid_lhs > stream_rhs->id) + return 1; + return 0; +} + +static int arm_smmu_streams_cmp_node(struct rb_node *lhs, + const struct rb_node *rhs) +{ + return arm_smmu_streams_cmp_key( + &rb_entry(lhs, struct arm_smmu_stream, node)->id, rhs); +} + static struct arm_smmu_master * arm_smmu_find_master(struct arm_smmu_device *smmu, u32 sid) { struct rb_node *node; - struct arm_smmu_stream *stream; lockdep_assert_held(&smmu->streams_mutex); - node = smmu->streams.rb_node; - while (node) { - stream = rb_entry(node, struct arm_smmu_stream, node); - if (stream->id < sid) - node = node->rb_right; - else if (stream->id > sid) - node = node->rb_left; - else - return stream->master; - } - - return NULL; + node = rb_find(&sid, &smmu->streams, arm_smmu_streams_cmp_key); + if (!node) + return NULL; + return rb_entry(node, struct arm_smmu_stream, node)->master; } /* IRQ and event handlers */ @@ -2590,8 +2601,6 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, { int i; int ret = 0; - struct arm_smmu_stream *new_stream, *cur_stream; - struct rb_node **new_node, *parent_node = NULL; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(master->dev); master->streams = kcalloc(fwspec->num_ids, sizeof(*master->streams), @@ -2602,9 +2611,10 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, mutex_lock(&smmu->streams_mutex); for (i = 0; i < fwspec->num_ids; i++) { + struct arm_smmu_stream *new_stream = &master->streams[i]; + struct rb_node *existing; u32 sid = fwspec->ids[i]; - new_stream = &master->streams[i]; new_stream->id = sid; new_stream->master = master; @@ -2613,28 +2623,23 @@ static int arm_smmu_insert_master(struct arm_smmu_device *smmu, break; /* Insert into SID tree */ - new_node = &(smmu->streams.rb_node); - while (*new_node) { - cur_stream = rb_entry(*new_node, struct arm_smmu_stream, - node); - parent_node = *new_node; - if (cur_stream->id > new_stream->id) { - new_node = &((*new_node)->rb_left); - } else if (cur_stream->id < new_stream->id) { - new_node = &((*new_node)->rb_right); - } else { - dev_warn(master->dev, - "stream %u already in tree\n", - cur_stream->id); - ret = -EINVAL; - break; - } - } - if (ret) - break; + existing = rb_find_add(&new_stream->node, &smmu->streams, + arm_smmu_streams_cmp_node); + if (existing) { + struct arm_smmu_master *existing_master = + rb_entry(existing, struct arm_smmu_stream, node) + ->master; - rb_link_node(&new_stream->node, parent_node, new_node); - rb_insert_color(&new_stream->node, &smmu->streams); + /* Bridged PCI devices may end up with duplicated IDs */ + if (existing_master == master) + continue; + + dev_warn(master->dev, + "stream %u already in tree from dev %s\n", sid, + dev_name(existing_master->dev)); + ret = -EINVAL; + break; + } } if (ret) { @@ -3880,7 +3885,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Initialise in-memory data structures */ ret = arm_smmu_init_structures(smmu); if (ret) - return ret; + goto err_free_iopf; /* Record our private device structure */ platform_set_drvdata(pdev, smmu); @@ -3891,22 +3896,29 @@ static int arm_smmu_device_probe(struct platform_device *pdev) /* Reset the device */ ret = arm_smmu_device_reset(smmu, bypass); if (ret) - return ret; + goto err_disable; /* And we're up. Go go go! */ ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL, "smmu3.%pa", &ioaddr); if (ret) - return ret; + goto err_disable; ret = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); if (ret) { dev_err(dev, "Failed to register iommu\n"); - iommu_device_sysfs_remove(&smmu->iommu); - return ret; + goto err_free_sysfs; } return 0; + +err_free_sysfs: + iommu_device_sysfs_remove(&smmu->iommu); +err_disable: + arm_smmu_device_disable(smmu); +err_free_iopf: + iopf_queue_free(smmu->evtq.iopf); + return ret; } static int arm_smmu_device_remove(struct platform_device *pdev) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index f38b54a88767..294120049750 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1363,6 +1363,17 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) goto out_free; } else if (fwspec && fwspec->ops == &arm_smmu_ops) { smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); + + /* + * Defer probe if the relevant SMMU instance hasn't finished + * probing yet. This is a fragile hack and we'd ideally + * avoid this race in the core code. Until that's ironed + * out, however, this is the most pragmatic option on the + * table. + */ + if (!smmu) + return ERR_PTR(dev_err_probe(dev, -EPROBE_DEFER, + "smmu dev has not bound yet\n")); } else { return ERR_PTR(-ENODEV); } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 3fa66dba0a32..cbf9ec320691 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1661,7 +1661,7 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) static DEFINE_MUTEX(msi_prepare_lock); /* see below */ if (!domain || !domain->iova_cookie) { - desc->iommu_cookie = NULL; + msi_desc_set_iommu_msi_iova(desc, 0, 0); return 0; } @@ -1673,11 +1673,12 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) mutex_lock(&msi_prepare_lock); msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); mutex_unlock(&msi_prepare_lock); - - msi_desc_set_iommu_cookie(desc, msi_page); - if (!msi_page) return -ENOMEM; + + msi_desc_set_iommu_msi_iova( + desc, msi_page->iova, + ilog2(cookie_msi_granule(domain->iova_cookie))); return 0; } @@ -1688,18 +1689,15 @@ int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) */ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) { - struct device *dev = msi_desc_to_dev(desc); - const struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - const struct iommu_dma_msi_page *msi_page; +#ifdef CONFIG_IRQ_MSI_IOMMU + if (desc->iommu_msi_shift) { + u64 msi_iova = desc->iommu_msi_iova << desc->iommu_msi_shift; - msi_page = msi_desc_get_iommu_cookie(desc); - - if (!domain || !domain->iova_cookie || WARN_ON(!msi_page)) - return; - - msg->address_hi = upper_32_bits(msi_page->iova); - msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1; - msg->address_lo += lower_32_bits(msi_page->iova); + msg->address_hi = upper_32_bits(msi_iova); + msg->address_lo = lower_32_bits(msi_iova) | + (msg->address_lo & ((1 << desc->iommu_msi_shift) - 1)); + } +#endif } static int iommu_dma_init(void) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index f118c8991130..5847ac29d976 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -831,14 +831,15 @@ static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, while (1) { offset = pfn_level_offset(pfn, level); pte = &parent[offset]; - if (!pte || (dma_pte_superpage(pte) || !dma_pte_present(pte))) { - pr_info("PTE not present at level %d\n", level); - break; - } pr_info("pte level: %d, pte value: 0x%016llx\n", level, pte->val); - if (level == 1) + if (!dma_pte_present(pte)) { + pr_info("page table not present at level %d\n", level - 1); + break; + } + + if (level == 1 || dma_pte_superpage(pte)) break; parent = phys_to_virt(dma_pte_addr(pte)); @@ -861,11 +862,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr); /* root entry dump */ - rt_entry = &iommu->root_entry[bus]; - if (!rt_entry) { - pr_info("root table entry is not present\n"); + if (!iommu->root_entry) { + pr_info("root table is not present\n"); return; } + rt_entry = &iommu->root_entry[bus]; if (sm_supported(iommu)) pr_info("scalable mode root entry: hi 0x%016llx, low 0x%016llx\n", @@ -876,7 +877,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, /* context entry dump */ ctx_entry = iommu_context_addr(iommu, bus, devfn, 0); if (!ctx_entry) { - pr_info("context table entry is not present\n"); + pr_info("context table is not present\n"); return; } @@ -885,17 +886,23 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, /* legacy mode does not require PASID entries */ if (!sm_supported(iommu)) { + if (!context_present(ctx_entry)) { + pr_info("legacy mode page table is not present\n"); + return; + } level = agaw_to_level(ctx_entry->hi & 7); pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); goto pgtable_walk; } - /* get the pointer to pasid directory entry */ - dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); - if (!dir) { - pr_info("pasid directory entry is not present\n"); + if (!context_present(ctx_entry)) { + pr_info("pasid directory table is not present\n"); return; } + + /* get the pointer to pasid directory entry */ + dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK); + /* For request-without-pasid, get the pasid from context entry */ if (intel_iommu_sm && pasid == INVALID_IOASID) pasid = PASID_RID2PASID; @@ -907,7 +914,7 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, /* get the pointer to the pasid table entry */ entries = get_pasid_table_from_pde(pde); if (!entries) { - pr_info("pasid table entry is not present\n"); + pr_info("pasid table is not present\n"); return; } index = pasid & PASID_PTE_MASK; @@ -915,6 +922,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id, for (i = 0; i < ARRAY_SIZE(pte->val); i++) pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]); + if (!pasid_pte_is_present(pte)) { + pr_info("scalable mode page table is not present\n"); + return; + } + if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) { level = pte->val[2] & BIT_ULL(2) ? 5 : 4; pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK); @@ -4824,6 +4836,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_igfx); +/* QM57/QS57 integrated gfx malfunctions with dmar */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_iommu_igfx); + /* Broadwell igfx malfunctions with dmar */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1606, quirk_iommu_igfx); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x160B, quirk_iommu_igfx); @@ -4901,7 +4916,6 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 1e38a24eb71c..d2134a2014a8 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -180,6 +180,18 @@ static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte, return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4); } +/* + * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into + * a concatenated PGD, into the maximum number of entries that can be + * mapped in the same table page. + */ +static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data) +{ + int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data); + + return ptes_per_table - (i & (ptes_per_table - 1)); +} + static bool selftest_running = false; static dma_addr_t __arm_lpae_dma_addr(void *pages) @@ -357,7 +369,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* If we can install a leaf entry at this level, then do so */ if (size == block_size) { - max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start; + max_entries = arm_lpae_max_entries(map_idx_start, data); num_entries = min_t(int, pgcount, max_entries); ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep); if (!ret && mapped) @@ -564,7 +576,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, if (size == split_sz) { unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data); - max_entries = ptes_per_table - unmap_idx_start; + max_entries = arm_lpae_max_entries(unmap_idx_start, data); num_entries = min_t(int, pgcount, max_entries); } @@ -622,7 +634,7 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, /* If the size matches this level, we're in the right place */ if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { - max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start; + max_entries = arm_lpae_max_entries(unmap_idx_start, data); num_entries = min_t(int, pgcount, max_entries); while (i < num_entries) { diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 576163f88a4a..d4cb09b2e267 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -1268,15 +1268,6 @@ static int mtk_iommu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); mutex_init(&data->mutex); - ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, - "mtk-iommu.%pa", &ioaddr); - if (ret) - goto out_link_remove; - - ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); - if (ret) - goto out_sysfs_remove; - if (MTK_IOMMU_HAS_FLAG(data->plat_data, SHARE_PGTABLE)) { list_add_tail(&data->list, data->plat_data->hw_list); data->hw_list = data->plat_data->hw_list; @@ -1286,19 +1277,28 @@ static int mtk_iommu_probe(struct platform_device *pdev) data->hw_list = &data->hw_list_head; } + ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, + "mtk-iommu.%pa", &ioaddr); + if (ret) + goto out_list_del; + + ret = iommu_device_register(&data->iommu, &mtk_iommu_ops, dev); + if (ret) + goto out_sysfs_remove; + if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { ret = component_master_add_with_match(dev, &mtk_iommu_com_ops, match); if (ret) - goto out_list_del; + goto out_device_unregister; } return ret; -out_list_del: - list_del(&data->list); +out_device_unregister: iommu_device_unregister(&data->iommu); out_sysfs_remove: iommu_device_sysfs_remove(&data->iommu); -out_link_remove: +out_list_del: + list_del(&data->list); if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) device_link_remove(data->smicomm_dev, dev); out_runtime_disable: diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 1c2813ad8bbe..ba1e4a90ab5f 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -555,7 +555,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs) AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT)); } - if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) { + if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) == + (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) { int irq; if (cpumask_test_cpu(smp_processor_id(), &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff)) diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c index 414cd925064f..c04f2481068b 100644 --- a/drivers/irqchip/irq-gic-v2m.c +++ b/drivers/irqchip/irq-gic-v2m.c @@ -262,7 +262,7 @@ static struct msi_domain_info gicv2m_pmsi_domain_info = { .chip = &gicv2m_pmsi_irq_chip, }; -static void gicv2m_teardown(void) +static void __init gicv2m_teardown(void) { struct v2m_data *v2m, *tmp; @@ -277,7 +277,7 @@ static void gicv2m_teardown(void) } } -static int gicv2m_allocate_domains(struct irq_domain *parent) +static __init int gicv2m_allocate_domains(struct irq_domain *parent) { struct irq_domain *inner_domain, *pci_domain, *plat_domain; struct v2m_data *v2m; @@ -404,7 +404,7 @@ err_free_v2m: return ret; } -static const struct of_device_id gicv2m_device_id[] = { +static __initconst struct of_device_id gicv2m_device_id[] = { { .compatible = "arm,gic-v2m-frame", }, {}, }; @@ -469,7 +469,7 @@ static struct fwnode_handle *gicv2m_get_fwnode(struct device *dev) return data->fwnode; } -static bool acpi_check_amazon_graviton_quirks(void) +static __init bool acpi_check_amazon_graviton_quirks(void) { static struct acpi_table_madt *madt; acpi_status status; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index b9cd9f8d43b4..59af25999fce 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -1968,7 +1968,7 @@ static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) if (!is_v4(its_dev->its)) return -EINVAL; - guard(raw_spinlock_irq)(&its_dev->event_map.vlpi_lock); + guard(raw_spinlock)(&its_dev->event_map.vlpi_lock); /* Unmap request? */ if (!info) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 81bf812ed327..7e8a57d8349d 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1454,7 +1454,7 @@ static int gic_retrigger(struct irq_data *data) static int gic_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { - if (cmd == CPU_PM_EXIT) { + if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) { if (gic_dist_security_disabled()) gic_enable_redist(true); gic_cpu_sys_reg_init(); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 9346b855709d..67b7da993d2c 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -67,7 +67,7 @@ static void gic_check_cpu_features(void) union gic_base { void __iomem *common_base; - void __percpu * __iomem *percpu_base; + void __iomem * __percpu *percpu_base; }; struct gic_chip_data { diff --git a/drivers/irqchip/irq-qcom-mpm.c b/drivers/irqchip/irq-qcom-mpm.c index d30614661eea..775ab17e51f9 100644 --- a/drivers/irqchip/irq-qcom-mpm.c +++ b/drivers/irqchip/irq-qcom-mpm.c @@ -226,6 +226,9 @@ static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; + if (pin == GPIO_NO_WAKE_IRQ) + return irq_domain_disconnect_hierarchy(domain, virq); + ret = irq_domain_set_hwirq_and_chip(domain, virq, pin, &qcom_mpm_chip, priv); if (ret) diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c index 21d49791f855..83c7417611fa 100644 --- a/drivers/irqchip/irq-sunxi-nmi.c +++ b/drivers/irqchip/irq-sunxi-nmi.c @@ -187,7 +187,8 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node, gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit; gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type; - gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED; + gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | + IRQCHIP_SKIP_SET_WAKE; gc->chip_types[0].regs.ack = reg_offs->pend; gc->chip_types[0].regs.mask = reg_offs->enable; gc->chip_types[0].regs.type = reg_offs->ctrl; diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c index 7899607fbee8..66e5163a3444 100644 --- a/drivers/irqchip/irqchip.c +++ b/drivers/irqchip/irqchip.c @@ -35,11 +35,10 @@ void __init irqchip_init(void) int platform_irqchip_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct device_node *par_np = of_irq_find_parent(np); + struct device_node *par_np __free(device_node) = of_irq_find_parent(np); of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev); if (!irq_init_cb) { - of_node_put(par_np); return -EINVAL; } @@ -55,7 +54,6 @@ int platform_irqchip_probe(struct platform_device *pdev) * interrupt controller can check for specific domains as necessary. */ if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) { - of_node_put(par_np); return -EPROBE_DEFER; } diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c index 2fab335a6425..71c1ddd71f8e 100644 --- a/drivers/leds/flash/leds-mt6360.c +++ b/drivers/leds/flash/leds-mt6360.c @@ -797,7 +797,6 @@ static void mt6360_v4l2_flash_release(struct mt6360_priv *priv) static int mt6360_led_probe(struct platform_device *pdev) { struct mt6360_priv *priv; - struct fwnode_handle *child; size_t count; int i = 0, ret; @@ -824,7 +823,7 @@ static int mt6360_led_probe(struct platform_device *pdev) return -ENODEV; } - device_for_each_child_node(&pdev->dev, child) { + device_for_each_child_node_scoped(&pdev->dev, child) { struct mt6360_led *led = priv->leds + i; struct led_init_data init_data = { .fwnode = child, }; u32 reg, led_color; diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 7391d2cf1370..93fdca5c7dc5 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -28,11 +28,14 @@ static ssize_t brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); + unsigned int brightness; - /* no lock needed for this */ + mutex_lock(&led_cdev->led_access); led_update_brightness(led_cdev); + brightness = led_cdev->brightness; + mutex_unlock(&led_cdev->led_access); - return sprintf(buf, "%u\n", led_cdev->brightness); + return sprintf(buf, "%u\n", brightness); } static ssize_t brightness_store(struct device *dev, @@ -69,8 +72,13 @@ static ssize_t max_brightness_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); + unsigned int max_brightness; - return sprintf(buf, "%u\n", led_cdev->max_brightness); + mutex_lock(&led_cdev->led_access); + max_brightness = led_cdev->max_brightness; + mutex_unlock(&led_cdev->led_access); + + return sprintf(buf, "%u\n", max_brightness); } static DEVICE_ATTR_RO(max_brightness); diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 9fdfc1b9a1a0..149a0e4d6f07 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -580,9 +580,6 @@ static int lp55xx_parse_common_child(struct device_node *np, if (ret) return ret; - if (*chan_nr < 0 || *chan_nr > cfg->max_channel) - return -EINVAL; - return 0; } diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c index e2b36d3187eb..b7da47333cd8 100644 --- a/drivers/leds/leds-lp8860.c +++ b/drivers/leds/leds-lp8860.c @@ -267,7 +267,7 @@ static int lp8860_init(struct lp8860_led *led) goto out; } - reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs) / sizeof(lp8860_eeprom_disp_regs[0]); + reg_count = ARRAY_SIZE(lp8860_eeprom_disp_regs); for (i = 0; i < reg_count; i++) { ret = regmap_write(led->eeprom_regmap, lp8860_eeprom_disp_regs[i].reg, diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c index b7855c93bd72..31eca8394a26 100644 --- a/drivers/leds/leds-mlxreg.c +++ b/drivers/leds/leds-mlxreg.c @@ -258,6 +258,7 @@ static int mlxreg_led_probe(struct platform_device *pdev) { struct mlxreg_core_platform_data *led_pdata; struct mlxreg_led_priv_data *priv; + int err; led_pdata = dev_get_platdata(&pdev->dev); if (!led_pdata) { @@ -269,28 +270,21 @@ static int mlxreg_led_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - mutex_init(&priv->access_lock); + err = devm_mutex_init(&pdev->dev, &priv->access_lock); + if (err) + return err; + priv->pdev = pdev; priv->pdata = led_pdata; return mlxreg_led_config(priv); } -static int mlxreg_led_remove(struct platform_device *pdev) -{ - struct mlxreg_led_priv_data *priv = dev_get_drvdata(&pdev->dev); - - mutex_destroy(&priv->access_lock); - - return 0; -} - static struct platform_driver mlxreg_led_driver = { .driver = { .name = "leds-mlxreg", }, .probe = mlxreg_led_probe, - .remove = mlxreg_led_remove, }; module_platform_driver(mlxreg_led_driver); diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c index 77213b79f84d..6692de0af68f 100644 --- a/drivers/leds/leds-netxbig.c +++ b/drivers/leds/leds-netxbig.c @@ -440,6 +440,7 @@ static int netxbig_leds_get_of_pdata(struct device *dev, } gpio_ext_pdev = of_find_device_by_node(gpio_ext_np); if (!gpio_ext_pdev) { + of_node_put(gpio_ext_np); dev_err(dev, "Failed to find platform device for gpio-ext\n"); return -ENODEV; } diff --git a/drivers/leds/rgb/leds-pwm-multicolor.c b/drivers/leds/rgb/leds-pwm-multicolor.c index da9d2218ae18..97aa06e2ff60 100644 --- a/drivers/leds/rgb/leds-pwm-multicolor.c +++ b/drivers/leds/rgb/leds-pwm-multicolor.c @@ -135,8 +135,11 @@ static int led_pwm_mc_probe(struct platform_device *pdev) /* init the multicolor's LED class device */ cdev = &priv->mc_cdev.led_cdev; - fwnode_property_read_u32(mcnode, "max-brightness", + ret = fwnode_property_read_u32(mcnode, "max-brightness", &cdev->max_brightness); + if (ret) + goto release_mcnode; + cdev->flags = LED_CORE_SUSPENDRESUME; cdev->brightness_set_blocking = led_pwm_mc_set; diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c index 0ec21dcdbde7..cff7c343ee08 100644 --- a/drivers/mailbox/arm_mhuv2.c +++ b/drivers/mailbox/arm_mhuv2.c @@ -500,7 +500,7 @@ static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = { static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg) { struct mbox_chan *chans = mhu->mbox.chans; - int channel = 0, i, offset = 0, windows, protocol, ch_wn; + int channel = 0, i, j, offset = 0, windows, protocol, ch_wn; u32 stat; for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) { @@ -510,9 +510,9 @@ static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 __iomem *reg) ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat); - for (i = 0; i < mhu->length; i += 2) { - protocol = mhu->protocols[i]; - windows = mhu->protocols[i + 1]; + for (j = 0; j < mhu->length; j += 2) { + protocol = mhu->protocols[j]; + windows = mhu->protocols[j + 1]; if (ch_wn >= offset + windows) { if (protocol == DOORBELL) diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index 304729935ae6..dc076194bace 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c @@ -354,11 +354,12 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index) mutex_lock(&con_mutex); - if (of_parse_phandle_with_args(dev->of_node, "mboxes", - "#mbox-cells", index, &spec)) { + ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells", + index, &spec); + if (ret) { dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__); mutex_unlock(&con_mutex); - return ERR_PTR(-ENODEV); + return ERR_PTR(ret); } chan = ERR_PTR(-EPROBE_DEFER); diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c index 9465f9081515..3d369c23970c 100644 --- a/drivers/mailbox/mtk-cmdq-mailbox.c +++ b/drivers/mailbox/mtk-cmdq-mailbox.c @@ -605,18 +605,18 @@ static int cmdq_probe(struct platform_device *pdev) cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i]; } - err = devm_mbox_controller_register(dev, &cmdq->mbox); - if (err < 0) { - dev_err(dev, "failed to register mailbox: %d\n", err); - return err; - } - platform_set_drvdata(pdev, cmdq); WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks)); cmdq_init(cmdq); + err = devm_mbox_controller_register(dev, &cmdq->mbox); + if (err < 0) { + dev_err(dev, "failed to register mailbox: %d\n", err); + return err; + } + return 0; } diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c index 573481e436f5..e12cd1a62155 100644 --- a/drivers/mailbox/tegra-hsp.c +++ b/drivers/mailbox/tegra-hsp.c @@ -388,7 +388,6 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel) value = tegra_hsp_channel_readl(channel, HSP_SM_SHRD_MBOX); value &= ~HSP_SM_SHRD_MBOX_FULL; msg = (void *)(unsigned long)value; - mbox_chan_received_data(channel->chan, msg); /* * Need to clear all bits here since some producers, such as TCU, depend @@ -398,6 +397,8 @@ static void tegra_hsp_sm_recv32(struct tegra_hsp_channel *channel) * explicitly, so we have to make sure we cover all possible cases. */ tegra_hsp_channel_writel(channel, 0x0, HSP_SM_SHRD_MBOX); + + mbox_chan_received_data(channel->chan, msg); } static const struct tegra_hsp_sm_ops tegra_hsp_sm_32bit_ops = { @@ -433,7 +434,6 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel) value[3] = tegra_hsp_channel_readl(channel, HSP_SHRD_MBOX_TYPE1_DATA3); msg = (void *)(unsigned long)value; - mbox_chan_received_data(channel->chan, msg); /* * Clear data registers and tag. @@ -443,6 +443,8 @@ static void tegra_hsp_sm_recv128(struct tegra_hsp_channel *channel) tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA2); tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_DATA3); tegra_hsp_channel_writel(channel, 0x0, HSP_SHRD_MBOX_TYPE1_TAG); + + mbox_chan_received_data(channel->chan, msg); } static const struct tegra_hsp_sm_ops tegra_hsp_sm_128bit_ops = { diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c index 1ae37e693de0..d080e21df666 100644 --- a/drivers/mcb/mcb-parse.c +++ b/drivers/mcb/mcb-parse.c @@ -101,7 +101,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus, ret = mcb_device_register(bus, mdev); if (ret < 0) - goto err; + return ret; return 0; diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index d8d9394a6beb..18f21d4e9aaa 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -17,10 +17,16 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) { int r = flags & CLOSURE_REMAINING_MASK; - BUG_ON(flags & CLOSURE_GUARD_MASK); - BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR)); + if (WARN(flags & CLOSURE_GUARD_MASK, + "closure has guard bits set: %x (%u)", + flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r))) + r &= ~CLOSURE_GUARD_MASK; if (!r) { + WARN(flags & ~CLOSURE_DESTRUCTOR, + "closure ref hit 0 with incorrect flags set: %x (%u)", + flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags)); + if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) { atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 659f6777b973..1ddae5c97239 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1726,7 +1726,7 @@ static void cache_set_flush(struct closure *cl) if (!IS_ERR_OR_NULL(c->gc_thread)) kthread_stop(c->gc_thread); - if (!IS_ERR(c->root)) + if (!IS_ERR_OR_NULL(c->root)) list_add(&c->root->list, &c->btree_cache); /* diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index ec662f97ba82..496f9176113f 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1685,7 +1685,8 @@ static void __scan(struct dm_bufio_client *c) atomic_long_dec(&c->need_shrink); freed++; } - cond_resched(); + if (!(static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)) + cond_resched(); } } } diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index e714114d495a..66608b42ee1a 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2875,6 +2875,27 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache) return to_cblock(size); } +static bool can_resume(struct cache *cache) +{ + /* + * Disallow retrying the resume operation for devices that failed the + * first resume attempt, as the failure leaves the policy object partially + * initialized. Retrying could trigger BUG_ON when loading cache mappings + * into the incomplete policy object. + */ + if (cache->sized && !cache->loaded_mappings) { + if (get_cache_mode(cache) != CM_WRITE) + DMERR("%s: unable to resume a failed-loaded cache, please check metadata.", + cache_device_name(cache)); + else + DMERR("%s: unable to resume cache due to missing proper cache table reload", + cache_device_name(cache)); + return false; + } + + return true; +} + static bool can_resize(struct cache *cache, dm_cblock_t new_size) { if (from_cblock(new_size) > from_cblock(cache->cache_size)) { @@ -2923,6 +2944,9 @@ static int cache_preresume(struct dm_target *ti) struct cache *cache = ti->private; dm_cblock_t csize = get_cache_dev_size(cache); + if (!can_resume(cache)) + return -EINVAL; + /* * Check to see if the cache has resized. */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 25e51dc6e559..57e662861c2e 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -56,6 +56,7 @@ struct convert_context { struct bio *bio_out; struct bvec_iter iter_out; atomic_t cc_pending; + unsigned int tag_offset; u64 cc_sector; union { struct skcipher_request *req; @@ -1223,6 +1224,7 @@ static void crypt_convert_init(struct crypt_config *cc, if (bio_out) ctx->iter_out = bio_out->bi_iter; ctx->cc_sector = sector + cc->iv_offset; + ctx->tag_offset = 0; init_completion(&ctx->restart); } @@ -1554,7 +1556,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_ static blk_status_t crypt_convert(struct crypt_config *cc, struct convert_context *ctx, bool atomic, bool reset_pending) { - unsigned int tag_offset = 0; unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; int r; @@ -1577,9 +1578,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc, atomic_inc(&ctx->cc_pending); if (crypt_integrity_aead(cc)) - r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); + r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset); else - r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset); + r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset); switch (r) { /* @@ -1599,8 +1600,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc, * exit and continue processing in a workqueue */ ctx->r.req = NULL; + ctx->tag_offset++; ctx->cc_sector += sector_step; - tag_offset++; return BLK_STS_DEV_RESOURCE; } } else { @@ -1614,8 +1615,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc, */ case -EINPROGRESS: ctx->r.req = NULL; + ctx->tag_offset++; ctx->cc_sector += sector_step; - tag_offset++; continue; /* * The request was already processed (synchronously). @@ -1623,7 +1624,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc, case 0: atomic_dec(&ctx->cc_pending); ctx->cc_sector += sector_step; - tag_offset++; + ctx->tag_offset++; if (!atomic) cond_resched(); continue; @@ -2029,7 +2030,6 @@ static void kcryptd_crypt_write_continue(struct work_struct *work) struct crypt_config *cc = io->cc; struct convert_context *ctx = &io->ctx; int crypt_finished; - sector_t sector = io->sector; blk_status_t r; wait_for_completion(&ctx->restart); @@ -2046,10 +2046,8 @@ static void kcryptd_crypt_write_continue(struct work_struct *work) } /* Encryption was already finished, submit io now */ - if (crypt_finished) { + if (crypt_finished) kcryptd_crypt_write_io_submit(io, 0); - io->sector = sector; - } crypt_dec_pending(io); } @@ -2060,14 +2058,13 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) struct convert_context *ctx = &io->ctx; struct bio *clone; int crypt_finished; - sector_t sector = io->sector; blk_status_t r; /* * Prevent io from disappearing until this function completes. */ crypt_inc_pending(io); - crypt_convert_init(cc, ctx, NULL, io->base_bio, sector); + crypt_convert_init(cc, ctx, NULL, io->base_bio, io->sector); clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); if (unlikely(!clone)) { @@ -2084,8 +2081,6 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.iter_in = clone->bi_iter; } - sector += bio_sectors(clone); - crypt_inc_pending(io); r = crypt_convert(cc, ctx, test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true); @@ -2109,10 +2104,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) } /* Encryption was already finished, submit io now */ - if (crypt_finished) { + if (crypt_finished) kcryptd_crypt_write_io_submit(io, 0); - io->sector = sector; - } dec: crypt_dec_pending(io); diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index 7606c6695a0e..9f0ce838d512 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -389,6 +389,12 @@ static int ebs_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_REMAPPED; } +static void ebs_postsuspend(struct dm_target *ti) +{ + struct ebs_c *ec = ti->private; + dm_bufio_client_reset(ec->bufio); +} + static void ebs_status(struct dm_target *ti, status_type_t type, unsigned int status_flags, char *result, unsigned int maxlen) { @@ -441,11 +447,12 @@ static int ebs_iterate_devices(struct dm_target *ti, static struct target_type ebs_target = { .name = "ebs", .version = {1, 0, 1}, - .features = DM_TARGET_PASSES_INTEGRITY, + .features = 0, .module = THIS_MODULE, .ctr = ebs_ctr, .dtr = ebs_dtr, .map = ebs_map, + .postsuspend = ebs_postsuspend, .status = ebs_status, .io_hints = ebs_io_hints, .prepare_ioctl = ebs_prepare_ioctl, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index e0ffac93f900..efd0732a8c10 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -4546,16 +4546,19 @@ try_smaller_buffer: ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); if (!ic->recalc_bitmap) { + ti->error = "Could not allocate memory for bitmap"; r = -ENOMEM; goto bad; } ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); if (!ic->may_write_bitmap) { + ti->error = "Could not allocate memory for bitmap"; r = -ENOMEM; goto bad; } ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL); if (!ic->bbs) { + ti->error = "Could not allocate memory for bitmap"; r = -ENOMEM; goto bad; } @@ -4634,7 +4637,7 @@ static void dm_integrity_dtr(struct dm_target *ti) BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); BUG_ON(!list_empty(&ic->wait_list)); - if (ic->mode == 'B') + if (ic->mode == 'B' && ic->bitmap_flush_work.work.func) cancel_delayed_work_sync(&ic->bitmap_flush_work); if (ic->metadata_wq) destroy_workqueue(ic->metadata_wq); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index aabb2435070b..8b23b8bc5a03 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -502,8 +502,9 @@ static char **realloc_argv(unsigned int *size, char **old_argv) gfp = GFP_NOIO; } argv = kmalloc_array(new_size, sizeof(*argv), gfp); - if (argv && old_argv) { - memcpy(argv, old_argv, *size * sizeof(*argv)); + if (argv) { + if (old_argv) + memcpy(argv, old_argv, *size * sizeof(*argv)); *size = new_size; } @@ -670,6 +671,10 @@ int dm_table_add_target(struct dm_table *t, const char *type, DMERR("%s: zero-length target", dm_device_name(t->md)); return -EINVAL; } + if (start + len < start || start + len > LLONG_MAX >> SECTOR_SHIFT) { + DMERR("%s: too large device", dm_device_name(t->md)); + return -EINVAL; + } ti->type = dm_get_target_type(type); if (!ti->type) { @@ -1242,7 +1247,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, t = dm_get_live_table(md, &srcu_idx); if (!t) - return 0; + goto put_live_table; for (unsigned int i = 0; i < t->num_targets; i++) { struct dm_target *ti = dm_table_get_target(t, i); @@ -1253,6 +1258,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile, (void *)key); } +put_live_table: dm_put_live_table(md, srcu_idx); return 0; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index f24d89af7c5f..aca3cfb46cf4 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2306,10 +2306,9 @@ static struct thin_c *get_first_thin(struct pool *pool) struct thin_c *tc = NULL; rcu_read_lock(); - if (!list_empty(&pool->active_thins)) { - tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); + tc = list_first_or_null_rcu(&pool->active_thins, struct thin_c, list); + if (tc) thin_get(tc); - } rcu_read_unlock(); return tc; @@ -2457,6 +2456,7 @@ static void pool_work_wait(struct pool_work *pw, struct pool *pool, init_completion(&pw->complete); queue_work(pool->wq, &pw->worker); wait_for_completion(&pw->complete); + destroy_work_on_stack(&pw->worker); } /*----------------------------------------------------------------*/ diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 0304e36af329..9bf48d34ef0f 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -60,14 +60,19 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio, * to the data block. Caller is responsible for releasing buf. */ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index, - unsigned int *offset, struct dm_buffer **buf) + unsigned int *offset, unsigned int par_buf_offset, + struct dm_buffer **buf) { u64 position, block, rem; u8 *res; + /* We have already part of parity bytes read, skip to the next block */ + if (par_buf_offset) + index++; + position = (index + rsb) * v->fec->roots; block = div64_u64_rem(position, v->fec->io_size, &rem); - *offset = (unsigned int)rem; + *offset = par_buf_offset ? 0 : (unsigned int)rem; res = dm_bufio_read(v->fec->bufio, block, buf); if (IS_ERR(res)) { @@ -127,10 +132,11 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, { int r, corrected = 0, res; struct dm_buffer *buf; - unsigned int n, i, offset; - u8 *par, *block; + unsigned int n, i, offset, par_buf_offset = 0; + u8 *par, *block, par_buf[DM_VERITY_FEC_RSM - DM_VERITY_FEC_MIN_RSN]; - par = fec_read_parity(v, rsb, block_offset, &offset, &buf); + par = fec_read_parity(v, rsb, block_offset, &offset, + par_buf_offset, &buf); if (IS_ERR(par)) return PTR_ERR(par); @@ -140,7 +146,8 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, */ fec_for_each_buffer_rs_block(fio, n, i) { block = fec_buffer_rs_block(v, fio, n, i); - res = fec_decode_rs8(v, fio, block, &par[offset], neras); + memcpy(&par_buf[par_buf_offset], &par[offset], v->fec->roots - par_buf_offset); + res = fec_decode_rs8(v, fio, block, par_buf, neras); if (res < 0) { r = res; goto error; @@ -153,12 +160,21 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, if (block_offset >= 1 << v->data_dev_block_bits) goto done; - /* read the next block when we run out of parity bytes */ - offset += v->fec->roots; + /* Read the next block when we run out of parity bytes */ + offset += (v->fec->roots - par_buf_offset); + /* Check if parity bytes are split between blocks */ + if (offset < v->fec->io_size && (offset + v->fec->roots) > v->fec->io_size) { + par_buf_offset = v->fec->io_size - offset; + memcpy(par_buf, &par[offset], par_buf_offset); + offset += par_buf_offset; + } else + par_buf_offset = 0; + if (offset >= v->fec->io_size) { dm_bufio_release(buf); - par = fec_read_parity(v, rsb, block_offset, &offset, &buf); + par = fec_read_parity(v, rsb, block_offset, &offset, + par_buf_offset, &buf); if (IS_ERR(par)) return PTR_ERR(par); } @@ -743,10 +759,7 @@ int verity_fec_ctr(struct dm_verity *v) return -E2BIG; } - if ((f->roots << SECTOR_SHIFT) & ((1 << v->data_dev_block_bits) - 1)) - f->io_size = 1 << v->data_dev_block_bits; - else - f->io_size = v->fec->roots << SECTOR_SHIFT; + f->io_size = 1 << v->data_dev_block_bits; f->bufio = dm_bufio_client_create(f->dev->bdev, f->io_size, diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 52585e2c61aa..a3c1682a2298 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -823,6 +823,13 @@ static int verity_map(struct dm_target *ti, struct bio *bio) return DM_MAPIO_SUBMITTED; } +static void verity_postsuspend(struct dm_target *ti) +{ + struct dm_verity *v = ti->private; + flush_workqueue(v->verify_wq); + dm_bufio_client_reset(v->bufio); +} + /* * Status: V (valid) or C (corruption found) */ @@ -1542,6 +1549,7 @@ static struct target_type verity_target = { .ctr = verity_ctr, .dtr = verity_dtr, .map = verity_map, + .postsuspend = verity_postsuspend, .status = verity_status, .prepare_ioctl = verity_prepare_ioctl, .iterate_devices = verity_iterate_devices, diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f70129bc703b..4767265793de 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1523,14 +1523,18 @@ static void __send_empty_flush(struct clone_info *ci) { struct dm_table *t = ci->map; struct bio flush_bio; + blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; + + if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) == + (REQ_IDLE | REQ_SYNC)) + opf |= REQ_IDLE; /* * Use an on-stack bio for this, it's safe since we don't * need to reference it after submit. It's just used as * the basis for the clone(s). */ - bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, - REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC); + bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf); ci->bio = &flush_bio; ci->sector_count = 0; diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index 9d8ac04c2346..02629516748e 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2022,33 +2022,28 @@ int md_bitmap_copy_from_slot(struct mddev *mddev, int slot, } EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot); - -void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap) +int md_bitmap_get_stats(struct bitmap *bitmap, struct md_bitmap_stats *stats) { - unsigned long chunk_kb; struct bitmap_counts *counts; + bitmap_super_t *sb; if (!bitmap) - return; + return -ENOENT; + if (!bitmap->mddev->bitmap_info.external && + !bitmap->storage.sb_page) + return -EINVAL; + sb = kmap_local_page(bitmap->storage.sb_page); + stats->sync_size = le64_to_cpu(sb->sync_size); + kunmap_local(sb); counts = &bitmap->counts; + stats->missing_pages = counts->missing_pages; + stats->pages = counts->pages; + stats->file = bitmap->storage.file; - chunk_kb = bitmap->mddev->bitmap_info.chunksize >> 10; - seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " - "%lu%s chunk", - counts->pages - counts->missing_pages, - counts->pages, - (counts->pages - counts->missing_pages) - << (PAGE_SHIFT - 10), - chunk_kb ? chunk_kb : bitmap->mddev->bitmap_info.chunksize, - chunk_kb ? "KB" : "B"); - if (bitmap->storage.file) { - seq_printf(seq, ", file: "); - seq_file_path(seq, bitmap->storage.file, " \t\n"); - } - - seq_printf(seq, "\n"); + return 0; } +EXPORT_SYMBOL_GPL(md_bitmap_get_stats); int md_bitmap_resize(struct bitmap *bitmap, sector_t blocks, int chunksize, int init) diff --git a/drivers/md/md-bitmap.h b/drivers/md/md-bitmap.h index 3a4750952b3a..7b7a701f74be 100644 --- a/drivers/md/md-bitmap.h +++ b/drivers/md/md-bitmap.h @@ -233,6 +233,13 @@ struct bitmap { int cluster_slot; /* Slot offset for clustered env */ }; +struct md_bitmap_stats { + unsigned long missing_pages; + unsigned long sync_size; + unsigned long pages; + struct file *file; +}; + /* the bitmap API */ /* these are used only by md/bitmap */ @@ -243,7 +250,7 @@ void md_bitmap_destroy(struct mddev *mddev); void md_bitmap_print_sb(struct bitmap *bitmap); void md_bitmap_update_sb(struct bitmap *bitmap); -void md_bitmap_status(struct seq_file *seq, struct bitmap *bitmap); +int md_bitmap_get_stats(struct bitmap *bitmap, struct md_bitmap_stats *stats); int md_bitmap_setallbits(struct bitmap *bitmap); void md_bitmap_write_all(struct bitmap *bitmap); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 10e0c5381d01..7484bb83171a 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -1185,18 +1185,21 @@ out: */ static int cluster_check_sync_size(struct mddev *mddev) { - int i, rv; - bitmap_super_t *sb; - unsigned long my_sync_size, sync_size = 0; - int node_num = mddev->bitmap_info.nodes; int current_slot = md_cluster_ops->slot_number(mddev); + int node_num = mddev->bitmap_info.nodes; struct bitmap *bitmap = mddev->bitmap; - char str[64]; struct dlm_lock_resource *bm_lockres; + struct md_bitmap_stats stats; + unsigned long sync_size = 0; + unsigned long my_sync_size; + char str[64]; + int i, rv; - sb = kmap_atomic(bitmap->storage.sb_page); - my_sync_size = sb->sync_size; - kunmap_atomic(sb); + rv = md_bitmap_get_stats(bitmap, &stats); + if (rv) + return rv; + + my_sync_size = stats.sync_size; for (i = 0; i < node_num; i++) { if (i == current_slot) @@ -1225,15 +1228,18 @@ static int cluster_check_sync_size(struct mddev *mddev) md_bitmap_update_sb(bitmap); lockres_free(bm_lockres); - sb = kmap_atomic(bitmap->storage.sb_page); - if (sync_size == 0) - sync_size = sb->sync_size; - else if (sync_size != sb->sync_size) { - kunmap_atomic(sb); + rv = md_bitmap_get_stats(bitmap, &stats); + if (rv) { + md_bitmap_free(bitmap); + return rv; + } + + if (sync_size == 0) { + sync_size = stats.sync_size; + } else if (sync_size != stats.sync_size) { md_bitmap_free(bitmap); return -1; } - kunmap_atomic(sb); md_bitmap_free(bitmap); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 297c86f5c70b..a9fcfcbc2d11 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -667,24 +667,34 @@ static inline struct mddev *mddev_get(struct mddev *mddev) static void mddev_delayed_delete(struct work_struct *ws); +static void __mddev_put(struct mddev *mddev) +{ + if (mddev->raid_disks || !list_empty(&mddev->disks) || + mddev->ctime || mddev->hold_active) + return; + + /* Array is not configured at all, and not held active, so destroy it */ + set_bit(MD_DELETED, &mddev->flags); + + /* + * Call queue_work inside the spinlock so that flush_workqueue() after + * mddev_find will succeed in waiting for the work to be done. + */ + INIT_WORK(&mddev->del_work, mddev_delayed_delete); + queue_work(md_misc_wq, &mddev->del_work); +} + +static void mddev_put_locked(struct mddev *mddev) +{ + if (atomic_dec_and_test(&mddev->active)) + __mddev_put(mddev); +} + void mddev_put(struct mddev *mddev) { if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) return; - if (!mddev->raid_disks && list_empty(&mddev->disks) && - mddev->ctime == 0 && !mddev->hold_active) { - /* Array is not configured at all, and not held active, - * so destroy it */ - set_bit(MD_DELETED, &mddev->flags); - - /* - * Call queue_work inside the spinlock so that - * flush_workqueue() after mddev_find will succeed in waiting - * for the work to be done. - */ - INIT_WORK(&mddev->del_work, mddev_delayed_delete); - queue_work(md_misc_wq, &mddev->del_work); - } + __mddev_put(mddev); spin_unlock(&all_mddevs_lock); } @@ -5955,6 +5965,13 @@ int md_run(struct mddev *mddev) goto exit_bio_set; } + if (!bioset_initialized(&mddev->io_acct_set)) { + err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE, + offsetof(struct md_io_acct, bio_clone), 0); + if (err) + goto exit_sync_set; + } + spin_lock(&pers_lock); pers = find_pers(mddev->level, mddev->clevel); if (!pers || !try_module_get(pers->owner)) { @@ -6132,6 +6149,8 @@ bitmap_abort: module_put(pers->owner); md_bitmap_destroy(mddev); abort: + bioset_exit(&mddev->io_acct_set); +exit_sync_set: bioset_exit(&mddev->sync_set); exit_bio_set: bioset_exit(&mddev->bio_set); @@ -6364,6 +6383,7 @@ static void __md_stop(struct mddev *mddev) percpu_ref_exit(&mddev->active_io); bioset_exit(&mddev->bio_set); bioset_exit(&mddev->sync_set); + bioset_exit(&mddev->io_acct_set); } void md_stop(struct mddev *mddev) @@ -8318,6 +8338,33 @@ static void md_seq_stop(struct seq_file *seq, void *v) mddev_put(mddev); } +static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev) +{ + struct md_bitmap_stats stats; + unsigned long used_pages; + unsigned long chunk_kb; + int err; + + err = md_bitmap_get_stats(mddev->bitmap, &stats); + if (err) + return; + + chunk_kb = mddev->bitmap_info.chunksize >> 10; + used_pages = stats.pages - stats.missing_pages; + + seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk", + used_pages, stats.pages, used_pages << (PAGE_SHIFT - 10), + chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, + chunk_kb ? "KB" : "B"); + + if (stats.file) { + seq_puts(seq, ", file: "); + seq_file_path(seq, stats.file, " \t\n"); + } + + seq_putc(seq, '\n'); +} + static int md_seq_show(struct seq_file *seq, void *v) { struct mddev *mddev = v; @@ -8341,6 +8388,9 @@ static int md_seq_show(struct seq_file *seq, void *v) return 0; } + /* prevent bitmap to be freed after checking */ + mutex_lock(&mddev->bitmap_info.mutex); + spin_lock(&mddev->lock); if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { seq_printf(seq, "%s : %sactive", mdname(mddev), @@ -8406,11 +8456,12 @@ static int md_seq_show(struct seq_file *seq, void *v) } else seq_printf(seq, "\n "); - md_bitmap_status(seq, mddev->bitmap); + md_bitmap_status(seq, mddev); seq_printf(seq, "\n"); } spin_unlock(&mddev->lock); + mutex_unlock(&mddev->bitmap_info.mutex); return 0; } @@ -8703,23 +8754,6 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, } EXPORT_SYMBOL_GPL(md_submit_discard_bio); -int acct_bioset_init(struct mddev *mddev) -{ - int err = 0; - - if (!bioset_initialized(&mddev->io_acct_set)) - err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE, - offsetof(struct md_io_acct, bio_clone), 0); - return err; -} -EXPORT_SYMBOL_GPL(acct_bioset_init); - -void acct_bioset_exit(struct mddev *mddev) -{ - bioset_exit(&mddev->io_acct_set); -} -EXPORT_SYMBOL_GPL(acct_bioset_exit); - static void md_end_io_acct(struct bio *bio) { struct md_io_acct *md_io_acct = bio->bi_private; @@ -9669,11 +9703,11 @@ EXPORT_SYMBOL_GPL(rdev_clear_badblocks); static int md_notify_reboot(struct notifier_block *this, unsigned long code, void *x) { - struct mddev *mddev, *n; + struct mddev *mddev; int need_delay = 0; spin_lock(&all_mddevs_lock); - list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { + list_for_each_entry(mddev, &all_mddevs, all_mddevs) { if (!mddev_get(mddev)) continue; spin_unlock(&all_mddevs_lock); @@ -9685,8 +9719,8 @@ static int md_notify_reboot(struct notifier_block *this, mddev_unlock(mddev); } need_delay = 1; - mddev_put(mddev); spin_lock(&all_mddevs_lock); + mddev_put_locked(mddev); } spin_unlock(&all_mddevs_lock); @@ -10008,7 +10042,7 @@ void md_autostart_arrays(int part) static __exit void md_exit(void) { - struct mddev *mddev, *n; + struct mddev *mddev; int delay = 1; unregister_blkdev(MD_MAJOR,"md"); @@ -10029,7 +10063,7 @@ static __exit void md_exit(void) remove_proc_entry("mdstat", NULL); spin_lock(&all_mddevs_lock); - list_for_each_entry_safe(mddev, n, &all_mddevs, all_mddevs) { + list_for_each_entry(mddev, &all_mddevs, all_mddevs) { if (!mddev_get(mddev)) continue; spin_unlock(&all_mddevs_lock); @@ -10041,8 +10075,8 @@ static __exit void md_exit(void) * the mddev for destruction by a workqueue, and the * destroy_workqueue() below will wait for that to complete. */ - mddev_put(mddev); spin_lock(&all_mddevs_lock); + mddev_put_locked(mddev); } spin_unlock(&all_mddevs_lock); diff --git a/drivers/md/md.h b/drivers/md/md.h index 4f0b48097455..1fda5e139beb 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -746,8 +746,6 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev); extern void md_finish_reshape(struct mddev *mddev); void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, struct bio *bio, sector_t start, sector_t size); -int acct_bioset_init(struct mddev *mddev); -void acct_bioset_exit(struct mddev *mddev); void md_account_bio(struct mddev *mddev, struct bio **bio); extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c index eff9b41869f2..767ab60507d9 100644 --- a/drivers/md/persistent-data/dm-array.c +++ b/drivers/md/persistent-data/dm-array.c @@ -912,23 +912,27 @@ static int load_ablock(struct dm_array_cursor *c) if (c->block) unlock_ablock(c->info, c->block); - c->block = NULL; - c->ab = NULL; c->index = 0; r = dm_btree_cursor_get_value(&c->cursor, &key, &value_le); if (r) { DMERR("dm_btree_cursor_get_value failed"); - dm_btree_cursor_end(&c->cursor); + goto out; } else { r = get_ablock(c->info, le64_to_cpu(value_le), &c->block, &c->ab); if (r) { DMERR("get_ablock failed"); - dm_btree_cursor_end(&c->cursor); + goto out; } } + return 0; + +out: + dm_btree_cursor_end(&c->cursor); + c->block = NULL; + c->ab = NULL; return r; } @@ -951,10 +955,10 @@ EXPORT_SYMBOL_GPL(dm_array_cursor_begin); void dm_array_cursor_end(struct dm_array_cursor *c) { - if (c->block) { + if (c->block) unlock_ablock(c->info, c->block); - dm_btree_cursor_end(&c->cursor); - } + + dm_btree_cursor_end(&c->cursor); } EXPORT_SYMBOL_GPL(dm_array_cursor_end); @@ -994,6 +998,7 @@ int dm_array_cursor_skip(struct dm_array_cursor *c, uint32_t count) } count -= remaining; + c->index += (remaining - 1); r = dm_array_cursor_next(c); } while (!r); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 7c6a0b4437d8..c50a7abda744 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -377,7 +377,6 @@ static void raid0_free(struct mddev *mddev, void *priv) struct r0conf *conf = priv; free_conf(mddev, conf); - acct_bioset_exit(mddev); } static int raid0_run(struct mddev *mddev) @@ -392,16 +391,11 @@ static int raid0_run(struct mddev *mddev) if (md_check_no_bitmap(mddev)) return -EINVAL; - if (acct_bioset_init(mddev)) { - pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev)); - return -ENOMEM; - } - /* if private is not null, we are here after takeover */ if (mddev->private == NULL) { ret = create_strip_zones(mddev, &conf); if (ret < 0) - goto exit_acct_set; + return ret; mddev->private = conf; } conf = mddev->private; @@ -432,15 +426,9 @@ static int raid0_run(struct mddev *mddev) ret = md_integrity_register(mddev); if (ret) - goto free; + free_conf(mddev, conf); return ret; - -free: - free_conf(mddev, conf); -exit_acct_set: - acct_bioset_exit(mddev); - return ret; } /* diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 76f7ca53d812..38e77a4b6b33 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2068,14 +2068,9 @@ static int fix_sync_read_error(struct r1bio *r1_bio) if (!rdev_set_badblocks(rdev, sect, s, 0)) abort = 1; } - if (abort) { - conf->recovery_disabled = - mddev->recovery_disabled; - set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_done_sync(mddev, r1_bio->sectors, 0); - put_buf(r1_bio); + if (abort) return 0; - } + /* Try next page */ sectors -= s; sect += s; @@ -2214,10 +2209,21 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) int disks = conf->raid_disks * 2; struct bio *wbio; - if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) - /* ouch - failed to read all of that. */ - if (!fix_sync_read_error(r1_bio)) + if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) { + /* + * ouch - failed to read all of that. + * No need to fix read error for check/repair + * because all member disks are read. + */ + if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) || + !fix_sync_read_error(r1_bio)) { + conf->recovery_disabled = mddev->recovery_disabled; + set_bit(MD_RECOVERY_INTR, &mddev->recovery); + md_done_sync(mddev, r1_bio->sectors, 0); + put_buf(r1_bio); return; + } + } if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) process_checks(r1_bio); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index f608f30c7d6a..24427eddf61b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1772,6 +1772,7 @@ retry_discard: * The discard bio returns only first r10bio finishes */ if (first_copy) { + md_account_bio(mddev, &bio); r10_bio->master_bio = bio; set_bit(R10BIO_Discard, &r10_bio->state); first_copy = false; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4315dabd3202..6e80a439ec45 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7770,19 +7770,12 @@ static int raid5_run(struct mddev *mddev) struct md_rdev *rdev; struct md_rdev *journal_dev = NULL; sector_t reshape_offset = 0; - int i, ret = 0; + int i; long long min_offset_diff = 0; int first = 1; - if (acct_bioset_init(mddev)) { - pr_err("md/raid456:%s: alloc acct bioset failed.\n", mdname(mddev)); + if (mddev_init_writes_pending(mddev) < 0) return -ENOMEM; - } - - if (mddev_init_writes_pending(mddev) < 0) { - ret = -ENOMEM; - goto exit_acct_set; - } if (mddev->recovery_cp != MaxSector) pr_notice("md/raid:%s: not clean -- starting background reconstruction\n", @@ -7813,8 +7806,7 @@ static int raid5_run(struct mddev *mddev) (mddev->bitmap_info.offset || mddev->bitmap_info.file)) { pr_notice("md/raid:%s: array cannot have both journal and bitmap\n", mdname(mddev)); - ret = -EINVAL; - goto exit_acct_set; + return -EINVAL; } if (mddev->reshape_position != MaxSector) { @@ -7839,15 +7831,13 @@ static int raid5_run(struct mddev *mddev) if (journal_dev) { pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n", mdname(mddev)); - ret = -EINVAL; - goto exit_acct_set; + return -EINVAL; } if (mddev->new_level != mddev->level) { pr_warn("md/raid:%s: unsupported reshape required - aborting.\n", mdname(mddev)); - ret = -EINVAL; - goto exit_acct_set; + return -EINVAL; } old_disks = mddev->raid_disks - mddev->delta_disks; /* reshape_position must be on a new-stripe boundary, and one @@ -7863,8 +7853,7 @@ static int raid5_run(struct mddev *mddev) if (sector_div(here_new, chunk_sectors * new_data_disks)) { pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n", mdname(mddev)); - ret = -EINVAL; - goto exit_acct_set; + return -EINVAL; } reshape_offset = here_new * chunk_sectors; /* here_new is the stripe we will write to */ @@ -7886,8 +7875,7 @@ static int raid5_run(struct mddev *mddev) else if (mddev->ro == 0) { pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n", mdname(mddev)); - ret = -EINVAL; - goto exit_acct_set; + return -EINVAL; } } else if (mddev->reshape_backwards ? (here_new * chunk_sectors + min_offset_diff <= @@ -7897,8 +7885,7 @@ static int raid5_run(struct mddev *mddev) /* Reading from the same stripe as writing to - bad */ pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n", mdname(mddev)); - ret = -EINVAL; - goto exit_acct_set; + return -EINVAL; } pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev)); /* OK, we should be able to continue; */ @@ -7922,10 +7909,8 @@ static int raid5_run(struct mddev *mddev) else conf = mddev->private; - if (IS_ERR(conf)) { - ret = PTR_ERR(conf); - goto exit_acct_set; - } + if (IS_ERR(conf)) + return PTR_ERR(conf); if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { if (!journal_dev) { @@ -8125,10 +8110,7 @@ abort: free_conf(conf); mddev->private = NULL; pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev)); - ret = -EIO; -exit_acct_set: - acct_bioset_exit(mddev); - return ret; + return -EIO; } static void raid5_free(struct mddev *mddev, void *priv) @@ -8136,7 +8118,6 @@ static void raid5_free(struct mddev *mddev, void *priv) struct r5conf *conf = priv; free_conf(conf); - acct_bioset_exit(mddev); mddev->to_remove = &raid5_attrs_group; } diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c index 40db7911b437..7b2db46a5722 100644 --- a/drivers/media/cec/platform/stm32/stm32-cec.c +++ b/drivers/media/cec/platform/stm32/stm32-cec.c @@ -288,12 +288,9 @@ static int stm32_cec_probe(struct platform_device *pdev) return ret; cec->clk_cec = devm_clk_get(&pdev->dev, "cec"); - if (IS_ERR(cec->clk_cec)) { - if (PTR_ERR(cec->clk_cec) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Cannot get cec clock\n"); - - return PTR_ERR(cec->clk_cec); - } + if (IS_ERR(cec->clk_cec)) + return dev_err_probe(&pdev->dev, PTR_ERR(cec->clk_cec), + "Cannot get cec clock\n"); ret = clk_prepare(cec->clk_cec); if (ret) { diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c index f80caaa333da..67c47e3d671d 100644 --- a/drivers/media/common/siano/smsdvb-main.c +++ b/drivers/media/common/siano/smsdvb-main.c @@ -1243,6 +1243,8 @@ static int __init smsdvb_module_init(void) smsdvb_debugfs_register(); rc = smscore_register_hotplug(smsdvb_hotplug); + if (rc) + smsdvb_debugfs_unregister(); pr_debug("\n"); diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c index d1212acb7093..fc9a9ef93cae 100644 --- a/drivers/media/dvb-core/dvbdev.c +++ b/drivers/media/dvb-core/dvbdev.c @@ -534,6 +534,9 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, for (minor = 0; minor < MAX_DVB_MINORS; minor++) if (dvb_minors[minor] == NULL) break; +#else + minor = nums2minor(adap->num, type, id); +#endif if (minor >= MAX_DVB_MINORS) { if (new_node) { list_del (&new_node->list_head); @@ -547,17 +550,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev, mutex_unlock(&dvbdev_register_lock); return -EINVAL; } -#else - minor = nums2minor(adap->num, type, id); - if (minor >= MAX_DVB_MINORS) { - dvb_media_device_free(dvbdev); - list_del(&dvbdev->list_head); - kfree(dvbdev); - *pdvbdev = NULL; - mutex_unlock(&dvbdev_register_lock); - return ret; - } -#endif + dvbdev->minor = minor; dvb_minors[minor] = dvb_device_get(dvbdev); up_write(&minor_rwsem); diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c index e9d1eef40c62..798da5042136 100644 --- a/drivers/media/dvb-frontends/cxd2841er.c +++ b/drivers/media/dvb-frontends/cxd2841er.c @@ -311,12 +311,8 @@ static int cxd2841er_set_reg_bits(struct cxd2841er_priv *priv, static u32 cxd2841er_calc_iffreq_xtal(enum cxd2841er_xtal xtal, u32 ifhz) { - u64 tmp; - - tmp = (u64) ifhz * 16777216; - do_div(tmp, ((xtal == SONY_XTAL_24000) ? 48000000 : 41000000)); - - return (u32) tmp; + return div_u64(ifhz * 16777216ull, + (xtal == SONY_XTAL_24000) ? 48000000 : 41000000); } static u32 cxd2841er_calc_iffreq(u32 ifhz) diff --git a/drivers/media/dvb-frontends/dib3000mb.c b/drivers/media/dvb-frontends/dib3000mb.c index c598b2a63325..7c452ddd9e40 100644 --- a/drivers/media/dvb-frontends/dib3000mb.c +++ b/drivers/media/dvb-frontends/dib3000mb.c @@ -51,7 +51,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info,2=xfer,4=setfe,8=getfe (|-a static int dib3000_read_reg(struct dib3000_state *state, u16 reg) { u8 wb[] = { ((reg >> 8) | 0x80) & 0xff, reg & 0xff }; - u8 rb[2]; + u8 rb[2] = {}; struct i2c_msg msg[] = { { .addr = state->config.demod_address, .flags = 0, .buf = wb, .len = 2 }, { .addr = state->config.demod_address, .flags = I2C_M_RD, .buf = rb, .len = 2 }, diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c index 301d8eca7a6f..f2fae575e84c 100644 --- a/drivers/media/dvb-frontends/dib8000.c +++ b/drivers/media/dvb-frontends/dib8000.c @@ -2701,8 +2701,11 @@ static void dib8000_set_dds(struct dib8000_state *state, s32 offset_khz) u8 ratio; if (state->revision == 0x8090) { + u32 internal = dib8000_read32(state, 23) / 1000; + ratio = 4; - unit_khz_dds_val = (1<<26) / (dib8000_read32(state, 23) / 1000); + + unit_khz_dds_val = (1<<26) / (internal ?: 1); if (offset_khz < 0) dds = (1 << 26) - (abs_offset_khz * unit_khz_dds_val); else diff --git a/drivers/media/dvb-frontends/ts2020.c b/drivers/media/dvb-frontends/ts2020.c index 2f64f1a8bc23..28de1c7de38f 100644 --- a/drivers/media/dvb-frontends/ts2020.c +++ b/drivers/media/dvb-frontends/ts2020.c @@ -554,13 +554,19 @@ static int ts2020_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ts2020_config *pdata = client->dev.platform_data; - struct dvb_frontend *fe = pdata->fe; + struct dvb_frontend *fe; struct ts2020_priv *dev; int ret; u8 u8tmp; unsigned int utmp; char *chip_str; + if (!pdata) { + dev_err(&client->dev, "platform data is mandatory\n"); + return -EINVAL; + } + + fe = pdata->fe; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; diff --git a/drivers/media/i2c/ad5820.c b/drivers/media/i2c/ad5820.c index 088c29c4e252..56d22d02a0d9 100644 --- a/drivers/media/i2c/ad5820.c +++ b/drivers/media/i2c/ad5820.c @@ -301,21 +301,15 @@ static int ad5820_probe(struct i2c_client *client, return -ENOMEM; coil->vana = devm_regulator_get(&client->dev, "VANA"); - if (IS_ERR(coil->vana)) { - ret = PTR_ERR(coil->vana); - if (ret != -EPROBE_DEFER) - dev_err(&client->dev, "could not get regulator for vana\n"); - return ret; - } + if (IS_ERR(coil->vana)) + return dev_err_probe(&client->dev, PTR_ERR(coil->vana), + "could not get regulator for vana\n"); coil->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR(coil->enable_gpio)) { - ret = PTR_ERR(coil->enable_gpio); - if (ret != -EPROBE_DEFER) - dev_err(&client->dev, "could not get enable gpio\n"); - return ret; - } + if (IS_ERR(coil->enable_gpio)) + return dev_err_probe(&client->dev, PTR_ERR(coil->enable_gpio), + "could not get enable gpio\n"); mutex_init(&coil->power_lock); diff --git a/drivers/media/i2c/adv7180.c b/drivers/media/i2c/adv7180.c index 216fe396973f..46912a7b671a 100644 --- a/drivers/media/i2c/adv7180.c +++ b/drivers/media/i2c/adv7180.c @@ -194,6 +194,7 @@ struct adv7180_state; #define ADV7180_FLAG_V2 BIT(1) #define ADV7180_FLAG_MIPI_CSI2 BIT(2) #define ADV7180_FLAG_I2P BIT(3) +#define ADV7180_FLAG_TEST_PATTERN BIT(4) struct adv7180_chip_info { unsigned int flags; @@ -673,11 +674,15 @@ static int adv7180_init_controls(struct adv7180_state *state) ADV7180_HUE_MAX, 1, ADV7180_HUE_DEF); v4l2_ctrl_new_custom(&state->ctrl_hdl, &adv7180_ctrl_fast_switch, NULL); - v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, &adv7180_ctrl_ops, - V4L2_CID_TEST_PATTERN, - ARRAY_SIZE(test_pattern_menu) - 1, - 0, ARRAY_SIZE(test_pattern_menu) - 1, - test_pattern_menu); + if (state->chip_info->flags & ADV7180_FLAG_TEST_PATTERN) { + v4l2_ctrl_new_std_menu_items(&state->ctrl_hdl, + &adv7180_ctrl_ops, + V4L2_CID_TEST_PATTERN, + ARRAY_SIZE(test_pattern_menu) - 1, + 0, + ARRAY_SIZE(test_pattern_menu) - 1, + test_pattern_menu); + } state->sd.ctrl_handler = &state->ctrl_hdl; if (state->ctrl_hdl.error) { @@ -1209,7 +1214,7 @@ static const struct adv7180_chip_info adv7182_info = { }; static const struct adv7180_chip_info adv7280_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | @@ -1223,7 +1228,8 @@ static const struct adv7180_chip_info adv7280_info = { }; static const struct adv7180_chip_info adv7280_m_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | @@ -1244,7 +1250,8 @@ static const struct adv7180_chip_info adv7280_m_info = { }; static const struct adv7180_chip_info adv7281_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN7) | @@ -1259,7 +1266,8 @@ static const struct adv7180_chip_info adv7281_info = { }; static const struct adv7180_chip_info adv7281_m_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | @@ -1279,7 +1287,8 @@ static const struct adv7180_chip_info adv7281_m_info = { }; static const struct adv7180_chip_info adv7281_ma_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | @@ -1304,7 +1313,7 @@ static const struct adv7180_chip_info adv7281_ma_info = { }; static const struct adv7180_chip_info adv7282_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_I2P | ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN7) | @@ -1319,7 +1328,8 @@ static const struct adv7180_chip_info adv7282_info = { }; static const struct adv7180_chip_info adv7282_m_info = { - .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P, + .flags = ADV7180_FLAG_V2 | ADV7180_FLAG_MIPI_CSI2 | ADV7180_FLAG_I2P | + ADV7180_FLAG_TEST_PATTERN, .valid_input_mask = BIT(ADV7182_INPUT_CVBS_AIN1) | BIT(ADV7182_INPUT_CVBS_AIN2) | BIT(ADV7182_INPUT_CVBS_AIN3) | diff --git a/drivers/media/i2c/adv748x/adv748x.h b/drivers/media/i2c/adv748x/adv748x.h index d75eb3d8be5a..2e827bec5823 100644 --- a/drivers/media/i2c/adv748x/adv748x.h +++ b/drivers/media/i2c/adv748x/adv748x.h @@ -322,7 +322,7 @@ struct adv748x_state { /* Free run pattern select */ #define ADV748X_SDP_FRP 0x14 -#define ADV748X_SDP_FRP_MASK GENMASK(3, 1) +#define ADV748X_SDP_FRP_MASK GENMASK(2, 0) /* Saturation */ #define ADV748X_SDP_SD_SAT_U 0xe3 /* user_map_rw_reg_e3 */ diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c index 96c2fa2c3b7a..d76ba25e0d1b 100644 --- a/drivers/media/i2c/adv7604.c +++ b/drivers/media/i2c/adv7604.c @@ -1405,12 +1405,13 @@ static int stdi2dv_timings(struct v4l2_subdev *sd, if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0, (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) | (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0), - false, timings)) + false, adv76xx_get_dv_timings_cap(sd, -1), timings)) return 0; if (v4l2_detect_gtf(stdi->lcf + 1, hfreq, stdi->lcvs, (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) | (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0), - false, state->aspect_ratio, timings)) + false, state->aspect_ratio, + adv76xx_get_dv_timings_cap(sd, -1), timings)) return 0; v4l2_dbg(2, debug, sd, diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c index 7731cc1887e6..50245c2e0782 100644 --- a/drivers/media/i2c/adv7842.c +++ b/drivers/media/i2c/adv7842.c @@ -1431,14 +1431,15 @@ static int stdi2dv_timings(struct v4l2_subdev *sd, } if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs, 0, - (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) | - (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0), - false, timings)) + (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) | + (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0), + false, adv7842_get_dv_timings_cap(sd), timings)) return 0; if (v4l2_detect_gtf(stdi->lcf + 1, hfreq, stdi->lcvs, - (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) | - (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0), - false, state->aspect_ratio, timings)) + (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) | + (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0), + false, state->aspect_ratio, + adv7842_get_dv_timings_cap(sd), timings)) return 0; v4l2_dbg(2, debug, sd, diff --git a/drivers/media/i2c/ccs/ccs-core.c b/drivers/media/i2c/ccs/ccs-core.c index 5fdb922d24e0..0579fae3d014 100644 --- a/drivers/media/i2c/ccs/ccs-core.c +++ b/drivers/media/i2c/ccs/ccs-core.c @@ -3642,6 +3642,7 @@ static int ccs_probe(struct i2c_client *client) out_disable_runtime_pm: pm_runtime_put_noidle(&client->dev); pm_runtime_disable(&client->dev); + pm_runtime_set_suspended(&client->dev); out_media_entity_cleanup: media_entity_cleanup(&sensor->src->sd.entity); @@ -3649,15 +3650,15 @@ out_media_entity_cleanup: out_cleanup: ccs_cleanup(sensor); +out_free_ccs_limits: + kfree(sensor->ccs_limits); + out_release_mdata: kvfree(sensor->mdata.backing); out_release_sdata: kvfree(sensor->sdata.backing); -out_free_ccs_limits: - kfree(sensor->ccs_limits); - out_power_off: ccs_power_off(&client->dev); mutex_destroy(&sensor->mutex); @@ -3674,9 +3675,10 @@ static void ccs_remove(struct i2c_client *client) v4l2_async_unregister_subdev(subdev); pm_runtime_disable(&client->dev); - if (!pm_runtime_status_suspended(&client->dev)) + if (!pm_runtime_status_suspended(&client->dev)) { ccs_power_off(&client->dev); - pm_runtime_set_suspended(&client->dev); + pm_runtime_set_suspended(&client->dev); + } for (i = 0; i < sensor->ssds_used; i++) { v4l2_device_unregister_subdev(&sensor->ssds[i].sd); diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c index 08400edf77ce..2591dba51e17 100644 --- a/drivers/media/i2c/ccs/ccs-data.c +++ b/drivers/media/i2c/ccs/ccs-data.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "ccs-data-defs.h" @@ -97,7 +98,7 @@ ccs_data_parse_length_specifier(const struct __ccs_data_length_specifier *__len, plen = ((size_t) (__len3->length[0] & ((1 << CCS_DATA_LENGTH_SPECIFIER_SIZE_SHIFT) - 1)) - << 16) + (__len3->length[0] << 8) + __len3->length[1]; + << 16) + (__len3->length[1] << 8) + __len3->length[2]; break; } default: @@ -948,15 +949,15 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data, rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, verbose); if (rval) - return rval; + goto out_cleanup; rval = bin_backing_alloc(&bin); if (rval) - return rval; + goto out_cleanup; rval = __ccs_data_parse(&bin, ccsdata, data, len, dev, false); if (rval) - goto out_free; + goto out_cleanup; if (verbose && ccsdata->version) print_ccs_data_version(dev, ccsdata->version); @@ -965,15 +966,16 @@ int ccs_data_parse(struct ccs_data_container *ccsdata, const void *data, rval = -EPROTO; dev_dbg(dev, "parsing mismatch; base %p; now %p; end %p\n", bin.base, bin.now, bin.end); - goto out_free; + goto out_cleanup; } ccsdata->backing = bin.base; return 0; -out_free: +out_cleanup: kvfree(bin.base); + memset(ccsdata, 0, sizeof(*ccsdata)); return rval; } diff --git a/drivers/media/i2c/dw9768.c b/drivers/media/i2c/dw9768.c index 83a3ee275bbe..15954568d170 100644 --- a/drivers/media/i2c/dw9768.c +++ b/drivers/media/i2c/dw9768.c @@ -476,10 +476,9 @@ static int dw9768_probe(struct i2c_client *client) * to be powered on in an ACPI system. Similarly for power off in * remove. */ - pm_runtime_enable(dev); full_power = (is_acpi_node(dev_fwnode(dev)) && acpi_dev_state_d0(dev)) || - (is_of_node(dev_fwnode(dev)) && !pm_runtime_enabled(dev)); + (is_of_node(dev_fwnode(dev)) && !IS_ENABLED(CONFIG_PM)); if (full_power) { ret = dw9768_runtime_resume(dev); if (ret < 0) { @@ -489,6 +488,7 @@ static int dw9768_probe(struct i2c_client *client) pm_runtime_set_active(dev); } + pm_runtime_enable(dev); ret = v4l2_async_register_subdev(&dw9768->sd); if (ret < 0) { dev_err(dev, "failed to register V4L2 subdev: %d", ret); @@ -500,12 +500,12 @@ static int dw9768_probe(struct i2c_client *client) return 0; err_power_off: + pm_runtime_disable(dev); if (full_power) { dw9768_runtime_suspend(dev); pm_runtime_set_suspended(dev); } err_clean_entity: - pm_runtime_disable(dev); media_entity_cleanup(&dw9768->sd.entity); err_free_handler: v4l2_ctrl_handler_free(&dw9768->ctrls); @@ -522,12 +522,12 @@ static void dw9768_remove(struct i2c_client *client) v4l2_async_unregister_subdev(&dw9768->sd); v4l2_ctrl_handler_free(&dw9768->ctrls); media_entity_cleanup(&dw9768->sd.entity); + pm_runtime_disable(dev); if ((is_acpi_node(dev_fwnode(dev)) && acpi_dev_state_d0(dev)) || - (is_of_node(dev_fwnode(dev)) && !pm_runtime_enabled(dev))) { + (is_of_node(dev_fwnode(dev)) && !IS_ENABLED(CONFIG_PM))) { dw9768_runtime_suspend(dev); pm_runtime_set_suspended(dev); } - pm_runtime_disable(dev); } static const struct of_device_id dw9768_of_table[] = { diff --git a/drivers/media/i2c/et8ek8/et8ek8_driver.c b/drivers/media/i2c/et8ek8/et8ek8_driver.c index ff9bb9fc97dd..49fe2bd702f7 100644 --- a/drivers/media/i2c/et8ek8/et8ek8_driver.c +++ b/drivers/media/i2c/et8ek8/et8ek8_driver.c @@ -1460,7 +1460,7 @@ err_mutex: return ret; } -static void __exit et8ek8_remove(struct i2c_client *client) +static void et8ek8_remove(struct i2c_client *client) { struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct et8ek8_sensor *sensor = to_et8ek8_sensor(subdev); @@ -1502,7 +1502,7 @@ static struct i2c_driver et8ek8_i2c_driver = { .of_match_table = et8ek8_of_table, }, .probe_new = et8ek8_probe, - .remove = __exit_p(et8ek8_remove), + .remove = et8ek8_remove, .id_table = et8ek8_id_table, }; diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c index a00761b1e18c..9219f3c9594b 100644 --- a/drivers/media/i2c/imx274.c +++ b/drivers/media/i2c/imx274.c @@ -2060,9 +2060,8 @@ static int imx274_probe(struct i2c_client *client) imx274->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(imx274->reset_gpio)) { - if (PTR_ERR(imx274->reset_gpio) != -EPROBE_DEFER) - dev_err(dev, "Reset GPIO not setup in DT"); - ret = PTR_ERR(imx274->reset_gpio); + ret = dev_err_probe(dev, PTR_ERR(imx274->reset_gpio), + "Reset GPIO not setup in DT\n"); goto err_me; } diff --git a/drivers/media/i2c/imx412.c b/drivers/media/i2c/imx412.c index 77fa6253ba3e..ab543bb8a621 100644 --- a/drivers/media/i2c/imx412.c +++ b/drivers/media/i2c/imx412.c @@ -549,7 +549,7 @@ static int imx412_update_exp_gain(struct imx412 *imx412, u32 exposure, u32 gain) lpfr = imx412->vblank + imx412->cur_mode->height; - dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u", + dev_dbg(imx412->dev, "Set exp %u, analog gain %u, lpfr %u\n", exposure, gain, lpfr); ret = imx412_write_reg(imx412, IMX412_REG_HOLD, 1, 1); @@ -596,7 +596,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl) case V4L2_CID_VBLANK: imx412->vblank = imx412->vblank_ctrl->val; - dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u", + dev_dbg(imx412->dev, "Received vblank %u, new lpfr %u\n", imx412->vblank, imx412->vblank + imx412->cur_mode->height); @@ -615,7 +615,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl) exposure = ctrl->val; analog_gain = imx412->again_ctrl->val; - dev_dbg(imx412->dev, "Received exp %u, analog gain %u", + dev_dbg(imx412->dev, "Received exp %u, analog gain %u\n", exposure, analog_gain); ret = imx412_update_exp_gain(imx412, exposure, analog_gain); @@ -624,7 +624,7 @@ static int imx412_set_ctrl(struct v4l2_ctrl *ctrl) break; default: - dev_err(imx412->dev, "Invalid control %d", ctrl->id); + dev_err(imx412->dev, "Invalid control %d\n", ctrl->id); ret = -EINVAL; } @@ -805,14 +805,14 @@ static int imx412_start_streaming(struct imx412 *imx412) ret = imx412_write_regs(imx412, reg_list->regs, reg_list->num_of_regs); if (ret) { - dev_err(imx412->dev, "fail to write initial registers"); + dev_err(imx412->dev, "fail to write initial registers\n"); return ret; } /* Setup handler will write actual exposure and gain */ ret = __v4l2_ctrl_handler_setup(imx412->sd.ctrl_handler); if (ret) { - dev_err(imx412->dev, "fail to setup handler"); + dev_err(imx412->dev, "fail to setup handler\n"); return ret; } @@ -823,7 +823,7 @@ static int imx412_start_streaming(struct imx412 *imx412) ret = imx412_write_reg(imx412, IMX412_REG_MODE_SELECT, 1, IMX412_MODE_STREAMING); if (ret) { - dev_err(imx412->dev, "fail to start streaming"); + dev_err(imx412->dev, "fail to start streaming\n"); return ret; } @@ -904,7 +904,7 @@ static int imx412_detect(struct imx412 *imx412) return ret; if (val != IMX412_ID) { - dev_err(imx412->dev, "chip id mismatch: %x!=%x", + dev_err(imx412->dev, "chip id mismatch: %x!=%x\n", IMX412_ID, val); return -ENXIO; } @@ -936,7 +936,7 @@ static int imx412_parse_hw_config(struct imx412 *imx412) imx412->reset_gpio = devm_gpiod_get_optional(imx412->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(imx412->reset_gpio)) { - dev_err(imx412->dev, "failed to get reset gpio %ld", + dev_err(imx412->dev, "failed to get reset gpio %ld\n", PTR_ERR(imx412->reset_gpio)); return PTR_ERR(imx412->reset_gpio); } @@ -944,13 +944,13 @@ static int imx412_parse_hw_config(struct imx412 *imx412) /* Get sensor input clock */ imx412->inclk = devm_clk_get(imx412->dev, NULL); if (IS_ERR(imx412->inclk)) { - dev_err(imx412->dev, "could not get inclk"); + dev_err(imx412->dev, "could not get inclk\n"); return PTR_ERR(imx412->inclk); } rate = clk_get_rate(imx412->inclk); if (rate != IMX412_INCLK_RATE) { - dev_err(imx412->dev, "inclk frequency mismatch"); + dev_err(imx412->dev, "inclk frequency mismatch\n"); return -EINVAL; } @@ -975,14 +975,14 @@ static int imx412_parse_hw_config(struct imx412 *imx412) if (bus_cfg.bus.mipi_csi2.num_data_lanes != IMX412_NUM_DATA_LANES) { dev_err(imx412->dev, - "number of CSI2 data lanes %d is not supported", + "number of CSI2 data lanes %d is not supported\n", bus_cfg.bus.mipi_csi2.num_data_lanes); ret = -EINVAL; goto done_endpoint_free; } if (!bus_cfg.nr_of_link_frequencies) { - dev_err(imx412->dev, "no link frequencies defined"); + dev_err(imx412->dev, "no link frequencies defined\n"); ret = -EINVAL; goto done_endpoint_free; } @@ -1040,7 +1040,7 @@ static int imx412_power_on(struct device *dev) ret = clk_prepare_enable(imx412->inclk); if (ret) { - dev_err(imx412->dev, "fail to enable inclk"); + dev_err(imx412->dev, "fail to enable inclk\n"); goto error_reset; } @@ -1151,7 +1151,7 @@ static int imx412_init_controls(struct imx412 *imx412) imx412->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY; if (ctrl_hdlr->error) { - dev_err(imx412->dev, "control init failed: %d", + dev_err(imx412->dev, "control init failed: %d\n", ctrl_hdlr->error); v4l2_ctrl_handler_free(ctrl_hdlr); return ctrl_hdlr->error; @@ -1184,7 +1184,7 @@ static int imx412_probe(struct i2c_client *client) ret = imx412_parse_hw_config(imx412); if (ret) { - dev_err(imx412->dev, "HW configuration is not supported"); + dev_err(imx412->dev, "HW configuration is not supported\n"); return ret; } @@ -1192,14 +1192,14 @@ static int imx412_probe(struct i2c_client *client) ret = imx412_power_on(imx412->dev); if (ret) { - dev_err(imx412->dev, "failed to power-on the sensor"); + dev_err(imx412->dev, "failed to power-on the sensor\n"); goto error_mutex_destroy; } /* Check module identity */ ret = imx412_detect(imx412); if (ret) { - dev_err(imx412->dev, "failed to find sensor: %d", ret); + dev_err(imx412->dev, "failed to find sensor: %d\n", ret); goto error_power_off; } @@ -1209,7 +1209,7 @@ static int imx412_probe(struct i2c_client *client) ret = imx412_init_controls(imx412); if (ret) { - dev_err(imx412->dev, "failed to init controls: %d", ret); + dev_err(imx412->dev, "failed to init controls: %d\n", ret); goto error_power_off; } @@ -1221,14 +1221,14 @@ static int imx412_probe(struct i2c_client *client) imx412->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&imx412->sd.entity, 1, &imx412->pad); if (ret) { - dev_err(imx412->dev, "failed to init entity pads: %d", ret); + dev_err(imx412->dev, "failed to init entity pads: %d\n", ret); goto error_handler_free; } ret = v4l2_async_register_subdev_sensor(&imx412->sd); if (ret < 0) { dev_err(imx412->dev, - "failed to register async subdev: %d", ret); + "failed to register async subdev: %d\n", ret); goto error_media_entity; } diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index e0019668a8f8..ea4e02ead57a 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -1971,6 +1971,7 @@ static int ov5640_get_light_freq(struct ov5640_dev *sensor) light_freq = 50; } else { /* 60Hz */ + light_freq = 60; } } diff --git a/drivers/media/i2c/ov9282.c b/drivers/media/i2c/ov9282.c index df144a2f6eda..0ba4edd24d64 100644 --- a/drivers/media/i2c/ov9282.c +++ b/drivers/media/i2c/ov9282.c @@ -31,7 +31,7 @@ /* Exposure control */ #define OV9282_REG_EXPOSURE 0x3500 #define OV9282_EXPOSURE_MIN 1 -#define OV9282_EXPOSURE_OFFSET 12 +#define OV9282_EXPOSURE_OFFSET 25 #define OV9282_EXPOSURE_STEP 1 #define OV9282_EXPOSURE_DEFAULT 0x0282 diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index 68628ccecd16..2c8189e04a13 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -1891,12 +1891,9 @@ static int tc358743_probe_of(struct tc358743_state *state) int ret; refclk = devm_clk_get(dev, "refclk"); - if (IS_ERR(refclk)) { - if (PTR_ERR(refclk) != -EPROBE_DEFER) - dev_err(dev, "failed to get refclk: %ld\n", - PTR_ERR(refclk)); - return PTR_ERR(refclk); - } + if (IS_ERR(refclk)) + return dev_err_probe(dev, PTR_ERR(refclk), + "failed to get refclk\n"); ep = of_graph_get_next_endpoint(dev->of_node, NULL); if (!ep) { @@ -2162,8 +2159,10 @@ static int tc358743_probe(struct i2c_client *client) err_work_queues: cec_unregister_adapter(state->cec_adap); - if (!state->i2c_client->irq) + if (!state->i2c_client->irq) { + del_timer(&state->timer); flush_work(&state->work_i2c_poll); + } cancel_delayed_work(&state->delayed_work_enable_hotplug); mutex_destroy(&state->confctl_mutex); err_hdl: diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c index 2423714afcb9..1a556e381eac 100644 --- a/drivers/media/platform/allegro-dvt/allegro-core.c +++ b/drivers/media/platform/allegro-dvt/allegro-core.c @@ -1510,8 +1510,10 @@ static int allocate_buffers_internal(struct allegro_channel *channel, INIT_LIST_HEAD(&buffer->head); err = allegro_alloc_buffer(dev, buffer, size); - if (err) + if (err) { + kfree(buffer); goto err; + } list_add(&buffer->head, list); } @@ -3913,6 +3915,7 @@ static int allegro_probe(struct platform_device *pdev) if (ret < 0) { v4l2_err(&dev->v4l2_dev, "failed to request firmware: %d\n", ret); + v4l2_device_unregister(&dev->v4l2_dev); return ret; } diff --git a/drivers/media/platform/amphion/vpu_drv.c b/drivers/media/platform/amphion/vpu_drv.c index f01ce49d27e8..9afcf0236cf0 100644 --- a/drivers/media/platform/amphion/vpu_drv.c +++ b/drivers/media/platform/amphion/vpu_drv.c @@ -151,8 +151,8 @@ err_add_decoder: media_device_cleanup(&vpu->mdev); v4l2_device_unregister(&vpu->v4l2_dev); err_vpu_deinit: - pm_runtime_set_suspended(dev); pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); return ret; } diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c index e7a18948c4ab..19189d7449dc 100644 --- a/drivers/media/platform/amphion/vpu_v4l2.c +++ b/drivers/media/platform/amphion/vpu_v4l2.c @@ -740,6 +740,7 @@ int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func) vfd->fops = vdec_get_fops(); vfd->ioctl_ops = vdec_get_ioctl_ops(); } + video_set_drvdata(vfd, vpu); ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1); if (ret) { @@ -747,7 +748,6 @@ int vpu_add_func(struct vpu_dev *vpu, struct vpu_func *func) v4l2_m2m_release(func->m2m_dev); return ret; } - video_set_drvdata(vfd, vpu); func->vfd = vfd; ret = v4l2_m2m_register_media_controller(func->m2m_dev, func->vfd, func->function); diff --git a/drivers/media/platform/aspeed/aspeed-video.c b/drivers/media/platform/aspeed/aspeed-video.c index 20f795ccc11b..c5af28bf0e96 100644 --- a/drivers/media/platform/aspeed/aspeed-video.c +++ b/drivers/media/platform/aspeed/aspeed-video.c @@ -1047,7 +1047,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video) static void aspeed_video_set_resolution(struct aspeed_video *video) { struct v4l2_bt_timings *act = &video->active_timings; - unsigned int size = act->width * act->height; + unsigned int size = act->width * ALIGN(act->height, 8); /* Set capture/compression frame sizes */ aspeed_video_calc_compressed_size(video, size); @@ -1064,7 +1064,7 @@ static void aspeed_video_set_resolution(struct aspeed_video *video) u32 width = ALIGN(act->width, 64); aspeed_video_write(video, VE_CAP_WINDOW, width << 16 | act->height); - size = width * act->height; + size = width * ALIGN(act->height, 8); } else { aspeed_video_write(video, VE_CAP_WINDOW, act->width << 16 | act->height); diff --git a/drivers/media/platform/marvell/mcam-core.c b/drivers/media/platform/marvell/mcam-core.c index ad4a7922d0d7..22d1d11cd195 100644 --- a/drivers/media/platform/marvell/mcam-core.c +++ b/drivers/media/platform/marvell/mcam-core.c @@ -935,7 +935,12 @@ static int mclk_enable(struct clk_hw *hw) ret = pm_runtime_resume_and_get(cam->dev); if (ret < 0) return ret; - clk_enable(cam->clk[0]); + ret = clk_enable(cam->clk[0]); + if (ret) { + pm_runtime_put(cam->dev); + return ret; + } + mcam_reg_write(cam, REG_CLKCTRL, (mclk_src << 29) | mclk_div); mcam_ctlr_power_up(cam); diff --git a/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c index 1e3833f1c9ae..ad5fab2d8bfa 100644 --- a/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c +++ b/drivers/media/platform/mediatek/mdp/mtk_mdp_comp.c @@ -52,9 +52,8 @@ int mtk_mdp_comp_init(struct device *dev, struct device_node *node, for (i = 0; i < ARRAY_SIZE(comp->clk); i++) { comp->clk[i] = of_clk_get(node, i); if (IS_ERR(comp->clk[i])) { - if (PTR_ERR(comp->clk[i]) != -EPROBE_DEFER) - dev_err(dev, "Failed to get clock\n"); - ret = PTR_ERR(comp->clk[i]); + ret = dev_err_probe(dev, PTR_ERR(comp->clk[i]), + "Failed to get clock\n"); goto put_dev; } diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c index d8e66b645bd8..27f08b1d34d1 100644 --- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c +++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_fw_scp.c @@ -65,6 +65,8 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(struct mtk_vcodec_dev *dev) } fw = devm_kzalloc(&dev->plat_dev->dev, sizeof(*fw), GFP_KERNEL); + if (!fw) + return ERR_PTR(-ENOMEM); fw->type = SCP; fw->ops = &mtk_vcodec_rproc_msg; fw->scp = scp; diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c index 999ce7ee5fdc..6952875ca183 100644 --- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c +++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_h264_req_multi_if.c @@ -729,11 +729,16 @@ static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs return vpu_dec_reset(vpu); fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); + if (!fb) { + mtk_vcodec_err(inst, "fb buffer is NULL"); + return -ENOMEM; + } + src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); - y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + y_fb_dma = fb->base_y.dma_addr; + c_fb_dma = fb->base_c.dma_addr; mtk_vcodec_debug(inst, "[h264-dec] [%d] y_dma=%llx c_dma=%llx", inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma); diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c index e1fe2603e92e..22d8f178b04d 100644 --- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c +++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp8_req_if.c @@ -336,14 +336,18 @@ static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs, src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer); fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx); - dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + if (!fb) { + mtk_vcodec_err(inst, "fb buffer is NULL"); + return -ENOMEM; + } - y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0; + dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer); + y_fb_dma = fb->base_y.dma_addr; if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) c_fb_dma = y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h; else - c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0; + c_fb_dma = fb->base_c.dma_addr; inst->vsi->dec.bs_dma = (u64)bs->dma_addr; inst->vsi->dec.bs_sz = bs->size; diff --git a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c index cf16cf2807f0..f0e207044bf3 100644 --- a/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c +++ b/drivers/media/platform/mediatek/vcodec/vdec/vdec_vp9_req_lat_if.c @@ -1192,7 +1192,8 @@ err: return ret; } -static +/* clang stack usage explodes if this is inlined */ +static noinline_for_stack void vdec_vp9_slice_map_counts_eob_coef(unsigned int i, unsigned int j, unsigned int k, struct vdec_vp9_slice_frame_counts *counts, struct v4l2_vp9_frame_symbol_counts *counts_helper) diff --git a/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c b/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c index df309e8e9379..af3fc89b6cc5 100644 --- a/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c +++ b/drivers/media/platform/mediatek/vcodec/vdec_vpu_if.c @@ -213,6 +213,12 @@ int vpu_dec_init(struct vdec_vpu_inst *vpu) mtk_vcodec_debug(vpu, "vdec_inst=%p", vpu); err = vcodec_vpu_send_msg(vpu, (void *)&msg, sizeof(msg)); + + if (IS_ERR_OR_NULL(vpu->vsi)) { + mtk_vcodec_err(vpu, "invalid vdec vsi, status=%d", err); + return -EINVAL; + } + mtk_vcodec_debug(vpu, "- ret=%d", err); return err; } diff --git a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c index 81a44702a541..f2ecd3322d1e 100644 --- a/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c +++ b/drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c @@ -2097,9 +2097,12 @@ static void mxc_jpeg_detach_pm_domains(struct mxc_jpeg_dev *jpeg) int i; for (i = 0; i < jpeg->num_domains; i++) { - if (jpeg->pd_link[i] && !IS_ERR(jpeg->pd_link[i])) + if (!IS_ERR_OR_NULL(jpeg->pd_dev[i]) && + !pm_runtime_suspended(jpeg->pd_dev[i])) + pm_runtime_force_suspend(jpeg->pd_dev[i]); + if (!IS_ERR_OR_NULL(jpeg->pd_link[i])) device_link_del(jpeg->pd_link[i]); - if (jpeg->pd_dev[i] && !IS_ERR(jpeg->pd_dev[i])) + if (!IS_ERR_OR_NULL(jpeg->pd_dev[i])) dev_pm_domain_detach(jpeg->pd_dev[i], true); jpeg->pd_dev[i] = NULL; jpeg->pd_link[i] = NULL; @@ -2252,6 +2255,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev) jpeg->dec_vdev->vfl_dir = VFL_DIR_M2M; jpeg->dec_vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE; + video_set_drvdata(jpeg->dec_vdev, jpeg); if (mode == MXC_JPEG_ENCODE) { v4l2_disable_ioctl(jpeg->dec_vdev, VIDIOC_DECODER_CMD); v4l2_disable_ioctl(jpeg->dec_vdev, VIDIOC_TRY_DECODER_CMD); @@ -2264,7 +2268,6 @@ static int mxc_jpeg_probe(struct platform_device *pdev) dev_err(dev, "failed to register video device\n"); goto err_vdev_register; } - video_set_drvdata(jpeg->dec_vdev, jpeg); if (mode == MXC_JPEG_ENCODE) v4l2_info(&jpeg->v4l2_dev, "encoder device registered as /dev/video%d (%d,%d)\n", diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c index 6360314f04a6..b90e2e690f3a 100644 --- a/drivers/media/platform/qcom/camss/camss-csid.c +++ b/drivers/media/platform/qcom/camss/camss-csid.c @@ -239,11 +239,13 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable) int ret; if (enable) { - ret = v4l2_ctrl_handler_setup(&csid->ctrls); - if (ret < 0) { - dev_err(csid->camss->dev, - "could not sync v4l2 controls: %d\n", ret); - return ret; + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) { + ret = v4l2_ctrl_handler_setup(&csid->ctrls); + if (ret < 0) { + dev_err(csid->camss->dev, + "could not sync v4l2 controls: %d\n", ret); + return ret; + } } if (!csid->testgen.enabled && @@ -318,7 +320,8 @@ static void csid_try_format(struct csid_device *csid, break; case MSM_CSID_PAD_SRC: - if (csid->testgen_mode->cur.val == 0) { + if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED || + csid->testgen_mode->cur.val == 0) { /* Test generator is disabled, */ /* keep pad formats in sync */ u32 code = fmt->code; @@ -368,7 +371,8 @@ static int csid_enum_mbus_code(struct v4l2_subdev *sd, code->code = csid->formats[code->index].code; } else { - if (csid->testgen_mode->cur.val == 0) { + if (csid->testgen.nmodes == CSID_PAYLOAD_MODE_DISABLED || + csid->testgen_mode->cur.val == 0) { struct v4l2_mbus_framefmt *sink_fmt; sink_fmt = __csid_get_format(csid, sd_state, @@ -750,7 +754,8 @@ static int csid_link_setup(struct media_entity *entity, /* If test generator is enabled */ /* do not allow a link from CSIPHY to CSID */ - if (csid->testgen_mode->cur.val != 0) + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED && + csid->testgen_mode->cur.val != 0) return -EBUSY; sd = media_entity_to_v4l2_subdev(remote->entity); @@ -843,25 +848,28 @@ int msm_csid_register_entity(struct csid_device *csid, MSM_CSID_NAME, csid->id); v4l2_set_subdevdata(sd, csid); - ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); - if (ret < 0) { - dev_err(dev, "Failed to init ctrl handler: %d\n", ret); - return ret; + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) { + ret = v4l2_ctrl_handler_init(&csid->ctrls, 1); + if (ret < 0) { + dev_err(dev, "Failed to init ctrl handler: %d\n", ret); + return ret; + } + + csid->testgen_mode = + v4l2_ctrl_new_std_menu_items(&csid->ctrls, + &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, + csid->testgen.nmodes, 0, 0, + csid->testgen.modes); + + if (csid->ctrls.error) { + dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); + ret = csid->ctrls.error; + goto free_ctrl; + } + + csid->subdev.ctrl_handler = &csid->ctrls; } - csid->testgen_mode = v4l2_ctrl_new_std_menu_items(&csid->ctrls, - &csid_ctrl_ops, V4L2_CID_TEST_PATTERN, - csid->testgen.nmodes, 0, 0, - csid->testgen.modes); - - if (csid->ctrls.error) { - dev_err(dev, "Failed to init ctrl: %d\n", csid->ctrls.error); - ret = csid->ctrls.error; - goto free_ctrl; - } - - csid->subdev.ctrl_handler = &csid->ctrls; - ret = csid_init_formats(sd, NULL); if (ret < 0) { dev_err(dev, "Failed to init format: %d\n", ret); @@ -891,7 +899,8 @@ int msm_csid_register_entity(struct csid_device *csid, media_cleanup: media_entity_cleanup(&sd->entity); free_ctrl: - v4l2_ctrl_handler_free(&csid->ctrls); + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) + v4l2_ctrl_handler_free(&csid->ctrls); return ret; } @@ -904,5 +913,6 @@ void msm_csid_unregister_entity(struct csid_device *csid) { v4l2_device_unregister_subdev(&csid->subdev); media_entity_cleanup(&csid->subdev.entity); - v4l2_ctrl_handler_free(&csid->ctrls); + if (csid->testgen.nmodes != CSID_PAYLOAD_MODE_DISABLED) + v4l2_ctrl_handler_free(&csid->ctrls); } diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index 6f7ce4f53c90..46277a9be05a 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -405,8 +405,8 @@ err_of_depopulate: of_platform_depopulate(dev); err_runtime_disable: pm_runtime_put_noidle(dev); - pm_runtime_set_suspended(dev); pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); hfi_destroy(core); err_core_deinit: hfi_core_deinit(core, false); diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h index 32551c2602a9..83a97288319b 100644 --- a/drivers/media/platform/qcom/venus/core.h +++ b/drivers/media/platform/qcom/venus/core.h @@ -317,6 +317,14 @@ enum venus_dec_state { VENUS_DEC_STATE_DRC = 7, }; +enum venus_enc_state { + VENUS_ENC_STATE_DEINIT = 0, + VENUS_ENC_STATE_INIT = 1, + VENUS_ENC_STATE_ENCODING = 2, + VENUS_ENC_STATE_STOPPED = 3, + VENUS_ENC_STATE_DRAIN = 4, +}; + struct venus_ts_metadata { bool used; u64 ts_ns; @@ -382,6 +390,7 @@ enum venus_inst_modes { * @sequence_out: a sequence counter for output queue * @m2m_dev: a reference to m2m device structure * @m2m_ctx: a reference to m2m context structure + * @ctx_q_lock: a lock to serialize video device ioctl calls * @state: current state of the instance * @done: a completion for sync HFI operation * @error: an error returned during last HFI sync operation @@ -428,6 +437,7 @@ struct venus_inst { u8 quantization; u8 xfer_func; enum venus_dec_state codec_state; + enum venus_enc_state enc_state; wait_queue_head_t reconf_wait; unsigned int subscriptions; int buf_count; @@ -452,6 +462,7 @@ struct venus_inst { u32 sequence_out; struct v4l2_m2m_dev *m2m_dev; struct v4l2_m2m_ctx *m2m_ctx; + struct mutex ctx_q_lock; unsigned int state; struct completion done; unsigned int error; diff --git a/drivers/media/platform/qcom/venus/hfi_parser.c b/drivers/media/platform/qcom/venus/hfi_parser.c index c43839539d4d..8eb9e32031f7 100644 --- a/drivers/media/platform/qcom/venus/hfi_parser.c +++ b/drivers/media/platform/qcom/venus/hfi_parser.c @@ -19,6 +19,8 @@ static void init_codecs(struct venus_core *core) struct hfi_plat_caps *caps = core->caps, *cap; unsigned long bit; + core->codecs_count = 0; + if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM) return; @@ -62,7 +64,7 @@ fill_buf_mode(struct hfi_plat_caps *cap, const void *data, unsigned int num) cap->cap_bufs_mode_dynamic = true; } -static void +static int parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_buffer_alloc_mode_supported *mode = data; @@ -70,7 +72,7 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data) u32 *type; if (num_entries > MAX_ALLOC_MODE_ENTRIES) - return; + return -EINVAL; type = mode->data; @@ -82,6 +84,8 @@ parse_alloc_mode(struct venus_core *core, u32 codecs, u32 domain, void *data) type++; } + + return sizeof(*mode); } static void fill_profile_level(struct hfi_plat_caps *cap, const void *data, @@ -96,7 +100,7 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data, cap->num_pl += num; } -static void +static int parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_profile_level_supported *pl = data; @@ -104,12 +108,14 @@ parse_profile_level(struct venus_core *core, u32 codecs, u32 domain, void *data) struct hfi_profile_level pl_arr[HFI_MAX_PROFILE_COUNT] = {}; if (pl->profile_count > HFI_MAX_PROFILE_COUNT) - return; + return -EINVAL; memcpy(pl_arr, proflevel, pl->profile_count * sizeof(*proflevel)); for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, fill_profile_level, pl_arr, pl->profile_count); + + return pl->profile_count * sizeof(*proflevel) + sizeof(u32); } static void @@ -124,7 +130,7 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num) cap->num_caps += num; } -static void +static int parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_capabilities *caps = data; @@ -133,12 +139,14 @@ parse_caps(struct venus_core *core, u32 codecs, u32 domain, void *data) struct hfi_capability caps_arr[MAX_CAP_ENTRIES] = {}; if (num_caps > MAX_CAP_ENTRIES) - return; + return -EINVAL; memcpy(caps_arr, cap, num_caps * sizeof(*cap)); for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, fill_caps, caps_arr, num_caps); + + return sizeof(*caps); } static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts, @@ -153,7 +161,7 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts, cap->num_fmts += num_fmts; } -static void +static int parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) { struct hfi_uncompressed_format_supported *fmt = data; @@ -162,7 +170,8 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) struct raw_formats rawfmts[MAX_FMT_ENTRIES] = {}; u32 entries = fmt->format_entries; unsigned int i = 0; - u32 num_planes; + u32 num_planes = 0; + u32 size; while (entries) { num_planes = pinfo->num_planes; @@ -172,7 +181,7 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) i++; if (i >= MAX_FMT_ENTRIES) - return; + return -EINVAL; if (pinfo->num_planes > MAX_PLANES) break; @@ -184,9 +193,13 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data) for_each_codec(core->caps, ARRAY_SIZE(core->caps), codecs, domain, fill_raw_fmts, rawfmts, i); + size = fmt->format_entries * (sizeof(*constr) * num_planes + 2 * sizeof(u32)) + + 2 * sizeof(u32); + + return size; } -static void parse_codecs(struct venus_core *core, void *data) +static int parse_codecs(struct venus_core *core, void *data) { struct hfi_codec_supported *codecs = data; @@ -198,21 +211,27 @@ static void parse_codecs(struct venus_core *core, void *data) core->dec_codecs &= ~HFI_VIDEO_CODEC_SPARK; core->enc_codecs &= ~HFI_VIDEO_CODEC_HEVC; } + + return sizeof(*codecs); } -static void parse_max_sessions(struct venus_core *core, const void *data) +static int parse_max_sessions(struct venus_core *core, const void *data) { const struct hfi_max_sessions_supported *sessions = data; core->max_sessions_supported = sessions->max_sessions; + + return sizeof(*sessions); } -static void parse_codecs_mask(u32 *codecs, u32 *domain, void *data) +static int parse_codecs_mask(u32 *codecs, u32 *domain, void *data) { struct hfi_codec_mask_supported *mask = data; *codecs = mask->codecs; *domain = mask->video_domains; + + return sizeof(*mask); } static void parser_init(struct venus_inst *inst, u32 *codecs, u32 *domain) @@ -281,8 +300,9 @@ static int hfi_platform_parser(struct venus_core *core, struct venus_inst *inst) u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf, u32 size) { - unsigned int words_count = size >> 2; - u32 *word = buf, *data, codecs = 0, domain = 0; + u32 *words = buf, *payload, codecs = 0, domain = 0; + u32 *frame_size = buf + size; + u32 rem_bytes = size; int ret; ret = hfi_platform_parser(core, inst); @@ -299,38 +319,66 @@ u32 hfi_parser(struct venus_core *core, struct venus_inst *inst, void *buf, memset(core->caps, 0, sizeof(core->caps)); } - while (words_count) { - data = word + 1; + while (words < frame_size) { + payload = words + 1; - switch (*word) { + switch (*words) { case HFI_PROPERTY_PARAM_CODEC_SUPPORTED: - parse_codecs(core, data); + if (rem_bytes <= sizeof(struct hfi_codec_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_codecs(core, payload); + if (ret < 0) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + init_codecs(core); break; case HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED: - parse_max_sessions(core, data); + if (rem_bytes <= sizeof(struct hfi_max_sessions_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_max_sessions(core, payload); break; case HFI_PROPERTY_PARAM_CODEC_MASK_SUPPORTED: - parse_codecs_mask(&codecs, &domain, data); + if (rem_bytes <= sizeof(struct hfi_codec_mask_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_codecs_mask(&codecs, &domain, payload); break; case HFI_PROPERTY_PARAM_UNCOMPRESSED_FORMAT_SUPPORTED: - parse_raw_formats(core, codecs, domain, data); + if (rem_bytes <= sizeof(struct hfi_uncompressed_format_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_raw_formats(core, codecs, domain, payload); break; case HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED: - parse_caps(core, codecs, domain, data); + if (rem_bytes <= sizeof(struct hfi_capabilities)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_caps(core, codecs, domain, payload); break; case HFI_PROPERTY_PARAM_PROFILE_LEVEL_SUPPORTED: - parse_profile_level(core, codecs, domain, data); + if (rem_bytes <= sizeof(struct hfi_profile_level_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_profile_level(core, codecs, domain, payload); break; case HFI_PROPERTY_PARAM_BUFFER_ALLOC_MODE_SUPPORTED: - parse_alloc_mode(core, codecs, domain, data); + if (rem_bytes <= sizeof(struct hfi_buffer_alloc_mode_supported)) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + ret = parse_alloc_mode(core, codecs, domain, payload); break; default: + ret = sizeof(u32); break; } - word++; - words_count--; + if (ret < 0) + return HFI_ERR_SYS_INSUFFICIENT_RESOURCES; + + words += ret / sizeof(u32); + rem_bytes -= ret; } if (!core->max_sessions_supported) diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c index d46938aab26b..92ea4642d898 100644 --- a/drivers/media/platform/qcom/venus/hfi_venus.c +++ b/drivers/media/platform/qcom/venus/hfi_venus.c @@ -187,6 +187,9 @@ static int venus_write_queue(struct venus_hfi_device *hdev, /* ensure rd/wr indices's are read from memory */ rmb(); + if (qsize > IFACEQ_QUEUE_SIZE / 4) + return -EINVAL; + if (wr_idx >= rd_idx) empty_space = qsize - (wr_idx - rd_idx); else @@ -255,6 +258,9 @@ static int venus_read_queue(struct venus_hfi_device *hdev, wr_idx = qhdr->write_idx; qsize = qhdr->q_size; + if (qsize > IFACEQ_QUEUE_SIZE / 4) + return -EINVAL; + /* make sure data is valid before using it */ rmb(); @@ -1053,18 +1059,26 @@ static void venus_sfr_print(struct venus_hfi_device *hdev) { struct device *dev = hdev->core->dev; struct hfi_sfr *sfr = hdev->sfr.kva; + u32 size; void *p; if (!sfr) return; - p = memchr(sfr->data, '\0', sfr->buf_size); + size = sfr->buf_size; + if (!size) + return; + + if (size > ALIGNED_SFR_SIZE) + size = ALIGNED_SFR_SIZE; + + p = memchr(sfr->data, '\0', size); /* * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates * that Venus is in the process of crashing. */ if (!p) - sfr->data[sfr->buf_size - 1] = '\0'; + sfr->data[size - 1] = '\0'; dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data); } diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index 7ea976efc024..3b51d603605e 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -1604,6 +1604,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, src_vq->allow_zero_bytesused = 1; src_vq->min_buffers_needed = 0; src_vq->dev = inst->core->dev; + src_vq->lock = &inst->ctx_q_lock; ret = vb2_queue_init(src_vq); if (ret) return ret; @@ -1618,6 +1619,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, dst_vq->allow_zero_bytesused = 1; dst_vq->min_buffers_needed = 0; dst_vq->dev = inst->core->dev; + dst_vq->lock = &inst->ctx_q_lock; return vb2_queue_init(dst_vq); } @@ -1636,6 +1638,7 @@ static int vdec_open(struct file *file) INIT_LIST_HEAD(&inst->internalbufs); INIT_LIST_HEAD(&inst->list); mutex_init(&inst->lock); + mutex_init(&inst->ctx_q_lock); inst->core = core; inst->session_type = VIDC_SESSION_TYPE_DEC; @@ -1712,6 +1715,7 @@ static int vdec_close(struct file *file) ida_destroy(&inst->dpb_ids); hfi_session_destroy(inst); mutex_destroy(&inst->lock); + mutex_destroy(&inst->ctx_q_lock); v4l2_fh_del(&inst->fh); v4l2_fh_exit(&inst->fh); diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index cdb12546c4fa..abd25720b96b 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -520,6 +520,51 @@ static int venc_subscribe_event(struct v4l2_fh *fh, } } +static int +venc_encoder_cmd(struct file *file, void *fh, struct v4l2_encoder_cmd *cmd) +{ + struct venus_inst *inst = to_inst(file); + struct hfi_frame_data fdata = {0}; + int ret = 0; + + ret = v4l2_m2m_ioctl_try_encoder_cmd(file, fh, cmd); + if (ret) + return ret; + + mutex_lock(&inst->lock); + + if (cmd->cmd == V4L2_ENC_CMD_STOP && + inst->enc_state == VENUS_ENC_STATE_ENCODING) { + /* + * Implement V4L2_ENC_CMD_STOP by enqueue an empty buffer on + * encoder input to signal EOS. + */ + if (!(inst->streamon_out && inst->streamon_cap)) + goto unlock; + + fdata.buffer_type = HFI_BUFFER_INPUT; + fdata.flags |= HFI_BUFFERFLAG_EOS; + fdata.device_addr = 0xdeadb000; + + ret = hfi_session_process_buf(inst, &fdata); + + inst->enc_state = VENUS_ENC_STATE_DRAIN; + } else if (cmd->cmd == V4L2_ENC_CMD_START) { + if (inst->enc_state == VENUS_ENC_STATE_DRAIN) { + ret = -EBUSY; + goto unlock; + } + if (inst->enc_state == VENUS_ENC_STATE_STOPPED) { + vb2_clear_last_buffer_dequeued(&inst->fh.m2m_ctx->cap_q_ctx.q); + inst->enc_state = VENUS_ENC_STATE_ENCODING; + } + } + +unlock: + mutex_unlock(&inst->lock); + return ret; +} + static const struct v4l2_ioctl_ops venc_ioctl_ops = { .vidioc_querycap = venc_querycap, .vidioc_enum_fmt_vid_cap = venc_enum_fmt, @@ -548,6 +593,7 @@ static const struct v4l2_ioctl_ops venc_ioctl_ops = { .vidioc_subscribe_event = venc_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, + .vidioc_encoder_cmd = venc_encoder_cmd, }; static int venc_pm_get(struct venus_inst *inst) @@ -1196,6 +1242,8 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count) if (ret) goto error; + inst->enc_state = VENUS_ENC_STATE_ENCODING; + mutex_unlock(&inst->lock); return 0; @@ -1215,10 +1263,21 @@ error: static void venc_vb2_buf_queue(struct vb2_buffer *vb) { struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue); + struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); venc_pm_get_put(inst); mutex_lock(&inst->lock); + + if (inst->enc_state == VENUS_ENC_STATE_STOPPED) { + vbuf->sequence = inst->sequence_cap++; + vbuf->field = V4L2_FIELD_NONE; + vb2_set_plane_payload(vb, 0, 0); + v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_DONE); + mutex_unlock(&inst->lock); + return; + } + venus_helper_vb2_buf_queue(vb); mutex_unlock(&inst->lock); } @@ -1260,6 +1319,10 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type, vb->planes[0].data_offset = data_offset; vb->timestamp = timestamp_us * NSEC_PER_USEC; vbuf->sequence = inst->sequence_cap++; + if ((vbuf->flags & V4L2_BUF_FLAG_LAST) && + inst->enc_state == VENUS_ENC_STATE_DRAIN) { + inst->enc_state = VENUS_ENC_STATE_STOPPED; + } } else { vbuf->sequence = inst->sequence_out++; } @@ -1307,6 +1370,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, src_vq->allow_zero_bytesused = 1; src_vq->min_buffers_needed = 1; src_vq->dev = inst->core->dev; + src_vq->lock = &inst->ctx_q_lock; if (inst->core->res->hfi_version == HFI_VERSION_1XX) src_vq->bidirectional = 1; ret = vb2_queue_init(src_vq); @@ -1323,6 +1387,7 @@ static int m2m_queue_init(void *priv, struct vb2_queue *src_vq, dst_vq->allow_zero_bytesused = 1; dst_vq->min_buffers_needed = 1; dst_vq->dev = inst->core->dev; + dst_vq->lock = &inst->ctx_q_lock; return vb2_queue_init(dst_vq); } @@ -1355,6 +1420,7 @@ static int venc_open(struct file *file) INIT_LIST_HEAD(&inst->internalbufs); INIT_LIST_HEAD(&inst->list); mutex_init(&inst->lock); + mutex_init(&inst->ctx_q_lock); inst->core = core; inst->session_type = VIDC_SESSION_TYPE_ENC; @@ -1362,6 +1428,9 @@ static int venc_open(struct file *file) inst->core_acquired = false; inst->nonblock = file->f_flags & O_NONBLOCK; + if (inst->enc_state == VENUS_ENC_STATE_DEINIT) + inst->enc_state = VENUS_ENC_STATE_INIT; + venus_helper_init_instance(inst); ret = venc_ctrl_init(inst); @@ -1421,9 +1490,12 @@ static int venc_close(struct file *file) venc_ctrl_deinit(inst); hfi_session_destroy(inst); mutex_destroy(&inst->lock); + mutex_destroy(&inst->ctx_q_lock); v4l2_fh_del(&inst->fh); v4l2_fh_exit(&inst->fh); + inst->enc_state = VENUS_ENC_STATE_DEINIT; + venc_pm_put(inst, false); kfree(inst); diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.c b/drivers/media/platform/samsung/exynos4-is/media-dev.c index 2f3071acb9c9..98a60f01129d 100644 --- a/drivers/media/platform/samsung/exynos4-is/media-dev.c +++ b/drivers/media/platform/samsung/exynos4-is/media-dev.c @@ -1471,9 +1471,7 @@ static int fimc_md_probe(struct platform_device *pdev) pinctrl = devm_pinctrl_get(dev); if (IS_ERR(pinctrl)) { - ret = PTR_ERR(pinctrl); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to get pinctrl: %d\n", ret); + ret = dev_err_probe(dev, PTR_ERR(pinctrl), "Failed to get pinctrl\n"); goto err_clk; } diff --git a/drivers/media/platform/samsung/exynos4-is/media-dev.h b/drivers/media/platform/samsung/exynos4-is/media-dev.h index 62ad5d7e035a..f901ddb386ef 100644 --- a/drivers/media/platform/samsung/exynos4-is/media-dev.h +++ b/drivers/media/platform/samsung/exynos4-is/media-dev.h @@ -179,8 +179,9 @@ int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on); #ifdef CONFIG_OF static inline bool fimc_md_is_isp_available(struct device_node *node) { - node = of_get_child_by_name(node, FIMC_IS_OF_NODE_NAME); - return node ? of_device_is_available(node) : false; + struct device_node *child __free(device_node) = + of_get_child_by_name(node, FIMC_IS_OF_NODE_NAME); + return child ? of_device_is_available(child) : false; } #else #define fimc_md_is_isp_available(node) (false) diff --git a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c index 6a0d35f33e8c..f308ef618986 100644 --- a/drivers/media/platform/samsung/exynos4-is/mipi-csis.c +++ b/drivers/media/platform/samsung/exynos4-is/mipi-csis.c @@ -940,13 +940,19 @@ static int s5pcsis_pm_resume(struct device *dev, bool runtime) state->supplies); goto unlock; } - clk_enable(state->clock[CSIS_CLK_GATE]); + ret = clk_enable(state->clock[CSIS_CLK_GATE]); + if (ret) { + phy_power_off(state->phy); + regulator_bulk_disable(CSIS_NUM_SUPPLIES, + state->supplies); + goto unlock; + } } if (state->flags & ST_STREAMING) s5pcsis_start_stream(state); state->flags &= ~ST_SUSPENDED; - unlock: +unlock: mutex_unlock(&state->lock); return ret ? -EAGAIN : 0; } diff --git a/drivers/media/platform/samsung/s3c-camif/camif-core.c b/drivers/media/platform/samsung/s3c-camif/camif-core.c index 6e8ef86566b7..38c586b01b17 100644 --- a/drivers/media/platform/samsung/s3c-camif/camif-core.c +++ b/drivers/media/platform/samsung/s3c-camif/camif-core.c @@ -528,10 +528,19 @@ static int s3c_camif_remove(struct platform_device *pdev) static int s3c_camif_runtime_resume(struct device *dev) { struct camif_dev *camif = dev_get_drvdata(dev); + int ret; + + ret = clk_enable(camif->clock[CLK_GATE]); + if (ret) + return ret; - clk_enable(camif->clock[CLK_GATE]); /* null op on s3c244x */ - clk_enable(camif->clock[CLK_CAM]); + ret = clk_enable(camif->clock[CLK_CAM]); + if (ret) { + clk_disable(camif->clock[CLK_GATE]); + return ret; + } + return 0; } diff --git a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c index 1dbb89f0ddb8..b2a977f1ec18 100644 --- a/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c @@ -802,13 +802,12 @@ static int c8sectpfe_probe(struct platform_device *pdev) } tsin->i2c_adapter = of_find_i2c_adapter_by_node(i2c_bus); + of_node_put(i2c_bus); if (!tsin->i2c_adapter) { dev_err(&pdev->dev, "No i2c adapter found\n"); - of_node_put(i2c_bus); ret = -ENODEV; goto err_node_put; } - of_node_put(i2c_bus); tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0); diff --git a/drivers/media/platform/st/stm32/dma2d/dma2d.c b/drivers/media/platform/st/stm32/dma2d/dma2d.c index 9706aa41b5d2..1a8c4b76df0d 100644 --- a/drivers/media/platform/st/stm32/dma2d/dma2d.c +++ b/drivers/media/platform/st/stm32/dma2d/dma2d.c @@ -492,7 +492,8 @@ static void device_run(void *prv) dst->sequence = frm_cap->sequence++; v4l2_m2m_buf_copy_metadata(src, dst, true); - clk_enable(dev->gate); + if (clk_enable(dev->gate)) + goto end; dma2d_config_fg(dev, frm_out, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0)); diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c index 37458d4d9564..06be28b361f1 100644 --- a/drivers/media/platform/st/stm32/stm32-dcmi.c +++ b/drivers/media/platform/st/stm32/stm32-dcmi.c @@ -1946,12 +1946,9 @@ static int dcmi_probe(struct platform_device *pdev) return -ENOMEM; dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); - if (IS_ERR(dcmi->rstc)) { - if (PTR_ERR(dcmi->rstc) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Could not get reset control\n"); - - return PTR_ERR(dcmi->rstc); - } + if (IS_ERR(dcmi->rstc)) + return dev_err_probe(&pdev->dev, PTR_ERR(dcmi->rstc), + "Could not get reset control\n"); /* Get bus characteristics from devicetree */ np = of_graph_get_next_endpoint(np, NULL); @@ -2003,20 +2000,14 @@ static int dcmi_probe(struct platform_device *pdev) } mclk = devm_clk_get(&pdev->dev, "mclk"); - if (IS_ERR(mclk)) { - if (PTR_ERR(mclk) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Unable to get mclk\n"); - return PTR_ERR(mclk); - } + if (IS_ERR(mclk)) + return dev_err_probe(&pdev->dev, PTR_ERR(mclk), + "Unable to get mclk\n"); chan = dma_request_chan(&pdev->dev, "tx"); - if (IS_ERR(chan)) { - ret = PTR_ERR(chan); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "Failed to request DMA channel: %d\n", ret); - return ret; - } + if (IS_ERR(chan)) + return dev_err_probe(&pdev->dev, PTR_ERR(chan), + "Failed to request DMA channel\n"); dcmi->dma_max_burst = UINT_MAX; ret = dma_get_slave_caps(chan, &caps); diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c index 11ae479ee89c..e7327e38482d 100644 --- a/drivers/media/platform/ti/omap3isp/isp.c +++ b/drivers/media/platform/ti/omap3isp/isp.c @@ -1884,8 +1884,7 @@ static int isp_initialize_modules(struct isp_device *isp) ret = omap3isp_ccp2_init(isp); if (ret < 0) { - if (ret != -EPROBE_DEFER) - dev_err(isp->dev, "CCP2 initialization failed\n"); + dev_err_probe(isp->dev, ret, "CCP2 initialization failed\n"); goto error_ccp2; } diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c index a9d4ac84a8d8..d1971af5f7fa 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c @@ -517,6 +517,7 @@ static void set_buffers(struct hantro_ctx *ctx) hantro_reg_write(vpu, &g2_stream_len, src_len); hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len); hantro_reg_write(vpu, &g2_strm_start_offset, 0); + hantro_reg_write(vpu, &g2_start_bit, 0); hantro_reg_write(vpu, &g2_write_mvs_e, 1); hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma); diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c index 29b53febc2e7..d8a23f18cfbc 100644 --- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c +++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c @@ -976,11 +976,9 @@ static int xcsi2rxss_probe(struct platform_device *pdev) /* Reset GPIO */ xcsi2rxss->rst_gpio = devm_gpiod_get_optional(dev, "video-reset", GPIOD_OUT_HIGH); - if (IS_ERR(xcsi2rxss->rst_gpio)) { - if (PTR_ERR(xcsi2rxss->rst_gpio) != -EPROBE_DEFER) - dev_err(dev, "Video Reset GPIO not setup in DT"); - return PTR_ERR(xcsi2rxss->rst_gpio); - } + if (IS_ERR(xcsi2rxss->rst_gpio)) + return dev_err_probe(dev, PTR_ERR(xcsi2rxss->rst_gpio), + "Video Reset GPIO not setup in DT\n"); ret = xcsi2rxss_parse_of(xcsi2rxss); if (ret < 0) diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c index 8a316de70e6c..df26dbf914e8 100644 --- a/drivers/media/radio/wl128x/fmdrv_common.c +++ b/drivers/media/radio/wl128x/fmdrv_common.c @@ -466,11 +466,12 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload, jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000); return -ETIMEDOUT; } + spin_lock_irqsave(&fmdev->resp_skb_lock, flags); if (!fmdev->resp_skb) { + spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); fmerr("Response SKB is missing\n"); return -EFAULT; } - spin_lock_irqsave(&fmdev->resp_skb_lock, flags); skb = fmdev->resp_skb; fmdev->resp_skb = NULL; spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags); diff --git a/drivers/media/rc/gpio-ir-recv.c b/drivers/media/rc/gpio-ir-recv.c index 16795e07dc10..41ef8cdba28c 100644 --- a/drivers/media/rc/gpio-ir-recv.c +++ b/drivers/media/rc/gpio-ir-recv.c @@ -74,13 +74,9 @@ static int gpio_ir_recv_probe(struct platform_device *pdev) return -ENOMEM; gpio_dev->gpiod = devm_gpiod_get(dev, NULL, GPIOD_IN); - if (IS_ERR(gpio_dev->gpiod)) { - rc = PTR_ERR(gpio_dev->gpiod); - /* Just try again if this happens */ - if (rc != -EPROBE_DEFER) - dev_err(dev, "error getting gpio (%d)\n", rc); - return rc; - } + if (IS_ERR(gpio_dev->gpiod)) + return dev_err_probe(dev, PTR_ERR(gpio_dev->gpiod), + "error getting gpio\n"); gpio_dev->irq = gpiod_to_irq(gpio_dev->gpiod); if (gpio_dev->irq < 0) return gpio_dev->irq; diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c index d3063ddb472e..2b829c146db1 100644 --- a/drivers/media/rc/gpio-ir-tx.c +++ b/drivers/media/rc/gpio-ir-tx.c @@ -174,12 +174,9 @@ static int gpio_ir_tx_probe(struct platform_device *pdev) return -ENOMEM; gpio_ir->gpio = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_LOW); - if (IS_ERR(gpio_ir->gpio)) { - if (PTR_ERR(gpio_ir->gpio) != -EPROBE_DEFER) - dev_err(&pdev->dev, "Failed to get gpio (%ld)\n", - PTR_ERR(gpio_ir->gpio)); - return PTR_ERR(gpio_ir->gpio); - } + if (IS_ERR(gpio_ir->gpio)) + return dev_err_probe(&pdev->dev, PTR_ERR(gpio_ir->gpio), + "Failed to get gpio\n"); rcdev->priv = gpio_ir; rcdev->driver_name = DRIVER_NAME; diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c index 276bf3c8a8cb..8af94246e591 100644 --- a/drivers/media/rc/iguanair.c +++ b/drivers/media/rc/iguanair.c @@ -194,8 +194,10 @@ static int iguanair_send(struct iguanair *ir, unsigned size) if (rc) return rc; - if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0) + if (wait_for_completion_timeout(&ir->completion, TIMEOUT) == 0) { + usb_kill_urb(ir->urb_out); return -ETIMEDOUT; + } return rc; } diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c index a3b145183260..85080c3d2053 100644 --- a/drivers/media/rc/ir-rx51.c +++ b/drivers/media/rc/ir-rx51.c @@ -231,13 +231,8 @@ static int ir_rx51_probe(struct platform_device *dev) struct rc_dev *rcdev; pwm = pwm_get(&dev->dev, NULL); - if (IS_ERR(pwm)) { - int err = PTR_ERR(pwm); - - if (err != -EPROBE_DEFER) - dev_err(&dev->dev, "pwm_get failed: %d\n", err); - return err; - } + if (IS_ERR(pwm)) + return dev_err_probe(&dev->dev, PTR_ERR(pwm), "pwm_get failed\n"); /* Use default, in case userspace does not set the carrier */ ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC); diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c index 9b209e687f25..d3b48a0dd1f4 100644 --- a/drivers/media/rc/streamzap.c +++ b/drivers/media/rc/streamzap.c @@ -138,39 +138,10 @@ static void sz_push_half_space(struct streamzap_ir *sz, sz_push_full_space(sz, value & SZ_SPACE_MASK); } -/* - * streamzap_callback - usb IRQ handler callback - * - * This procedure is invoked on reception of data from - * the usb remote. - */ -static void streamzap_callback(struct urb *urb) +static void sz_process_ir_data(struct streamzap_ir *sz, int len) { - struct streamzap_ir *sz; unsigned int i; - int len; - if (!urb) - return; - - sz = urb->context; - len = urb->actual_length; - - switch (urb->status) { - case -ECONNRESET: - case -ENOENT: - case -ESHUTDOWN: - /* - * this urb is terminated, clean up. - * sz might already be invalid at this point - */ - dev_err(sz->dev, "urb terminated, status: %d\n", urb->status); - return; - default: - break; - } - - dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len); for (i = 0; i < len; i++) { dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n", i, (unsigned char)sz->buf_in[i]); @@ -219,6 +190,43 @@ static void streamzap_callback(struct urb *urb) } ir_raw_event_handle(sz->rdev); +} + +/* + * streamzap_callback - usb IRQ handler callback + * + * This procedure is invoked on reception of data from + * the usb remote. + */ +static void streamzap_callback(struct urb *urb) +{ + struct streamzap_ir *sz; + int len; + + if (!urb) + return; + + sz = urb->context; + len = urb->actual_length; + + switch (urb->status) { + case 0: + dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len); + sz_process_ir_data(sz, len); + break; + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* + * this urb is terminated, clean up. + * sz might already be invalid at this point + */ + dev_err(sz->dev, "urb terminated, status: %d\n", urb->status); + return; + default: + break; + } + usb_submit_urb(urb, GFP_ATOMIC); } @@ -385,8 +393,8 @@ static void streamzap_disconnect(struct usb_interface *interface) if (!sz) return; - rc_unregister_device(sz->rdev); usb_kill_urb(sz->urb_in); + rc_unregister_device(sz->rdev); usb_free_urb(sz->urb_in); usb_free_coherent(usbdev, sz->buf_in_len, sz->buf_in, sz->dma_in); diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c index dff7265a42ca..c1621680ec57 100644 --- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c +++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c @@ -191,10 +191,11 @@ static int vidtv_start_streaming(struct vidtv_dvb *dvb) mux_args.mux_buf_sz = mux_buf_sz; - dvb->streaming = true; dvb->mux = vidtv_mux_init(dvb->fe[0], dev, &mux_args); if (!dvb->mux) return -ENOMEM; + + dvb->streaming = true; vidtv_mux_start_thread(dvb->mux); dev_dbg_ratelimited(dev, "Started streaming\n"); @@ -205,6 +206,11 @@ static int vidtv_stop_streaming(struct vidtv_dvb *dvb) { struct device *dev = &dvb->pdev->dev; + if (!dvb->streaming) { + dev_warn_ratelimited(dev, "No streaming. Skipping.\n"); + return 0; + } + dvb->streaming = false; vidtv_mux_stop_thread(dvb->mux); vidtv_mux_destroy(dvb->mux); diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c index 7964426bf2f7..3367cdf9f104 100644 --- a/drivers/media/test-drivers/vim2m.c +++ b/drivers/media/test-drivers/vim2m.c @@ -1316,9 +1316,6 @@ static int vim2m_probe(struct platform_device *pdev) vfd->v4l2_dev = &dev->v4l2_dev; video_set_drvdata(vfd, dev); - v4l2_info(&dev->v4l2_dev, - "Device registered as /dev/video%d\n", vfd->num); - platform_set_drvdata(pdev, dev); dev->m2m_dev = v4l2_m2m_init(&m2m_ops); @@ -1345,6 +1342,9 @@ static int vim2m_probe(struct platform_device *pdev) goto error_m2m; } + v4l2_info(&dev->v4l2_dev, + "Device registered as /dev/video%d\n", vfd->num); + #ifdef CONFIG_MEDIA_CONTROLLER ret = v4l2_m2m_register_media_controller(dev->m2m_dev, vfd, MEDIA_ENT_F_PROC_VIDEO_SCALER); diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c index 690daada7db4..54e6a6772035 100644 --- a/drivers/media/test-drivers/vivid/vivid-kthread-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-kthread-cap.c @@ -894,9 +894,14 @@ static int vivid_thread_vid_cap(void *data) next_jiffies_since_start = jiffies_since_start; wait_jiffies = next_jiffies_since_start - jiffies_since_start; - while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && - !kthread_should_stop()) - schedule(); + if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) + continue; + + wait_queue_head_t wait; + + init_waitqueue_head(&wait); + wait_event_interruptible_timeout(wait, kthread_should_stop(), + cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "Video Capture Thread End\n"); return 0; diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-out.c b/drivers/media/test-drivers/vivid/vivid-kthread-out.c index 0833e021bb11..8a17a01e6e42 100644 --- a/drivers/media/test-drivers/vivid/vivid-kthread-out.c +++ b/drivers/media/test-drivers/vivid/vivid-kthread-out.c @@ -235,9 +235,14 @@ static int vivid_thread_vid_out(void *data) next_jiffies_since_start = jiffies_since_start; wait_jiffies = next_jiffies_since_start - jiffies_since_start; - while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && - !kthread_should_stop()) - schedule(); + if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) + continue; + + wait_queue_head_t wait; + + init_waitqueue_head(&wait); + wait_event_interruptible_timeout(wait, kthread_should_stop(), + cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "Video Output Thread End\n"); return 0; diff --git a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c index fa711ee36a3f..c862689786b6 100644 --- a/drivers/media/test-drivers/vivid/vivid-kthread-touch.c +++ b/drivers/media/test-drivers/vivid/vivid-kthread-touch.c @@ -135,9 +135,14 @@ static int vivid_thread_touch_cap(void *data) next_jiffies_since_start = jiffies_since_start; wait_jiffies = next_jiffies_since_start - jiffies_since_start; - while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && - !kthread_should_stop()) - schedule(); + if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) + continue; + + wait_queue_head_t wait; + + init_waitqueue_head(&wait); + wait_event_interruptible_timeout(wait, kthread_should_stop(), + cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "Touch Capture Thread End\n"); return 0; diff --git a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c index 0ae5628b86c9..abccd1d0109e 100644 --- a/drivers/media/test-drivers/vivid/vivid-sdr-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-sdr-cap.c @@ -206,9 +206,14 @@ static int vivid_thread_sdr_cap(void *data) next_jiffies_since_start = jiffies_since_start; wait_jiffies = next_jiffies_since_start - jiffies_since_start; - while (time_is_after_jiffies(cur_jiffies + wait_jiffies) && - !kthread_should_stop()) - schedule(); + if (!time_is_after_jiffies(cur_jiffies + wait_jiffies)) + continue; + + wait_queue_head_t wait; + + init_waitqueue_head(&wait); + wait_event_interruptible_timeout(wait, kthread_should_stop(), + cur_jiffies + wait_jiffies - jiffies); } dprintk(dev, 1, "SDR Capture Thread End\n"); return 0; diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index 3864df45077d..b26b567f0c7f 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -1719,12 +1719,19 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) h_freq = (u32)bt->pixelclock / total_h_pixel; if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { + struct v4l2_dv_timings cvt = {}; + if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width, - bt->polarities, bt->interlaced, timings)) + bt->polarities, bt->interlaced, + &vivid_dv_timings_cap, &cvt) && + cvt.bt.width == bt->width && cvt.bt.height == bt->height) { + *timings = cvt; return true; + } } if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { + struct v4l2_dv_timings gtf = {}; struct v4l2_fract aspect_ratio; find_aspect_ratio(bt->width, bt->height, @@ -1732,8 +1739,12 @@ static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) &aspect_ratio.denominator); if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, bt->polarities, bt->interlaced, - aspect_ratio, timings)) + aspect_ratio, &vivid_dv_timings_cap, + >f) && + gtf.bt.width == bt->width && gtf.bt.height == bt->height) { + *timings = gtf; return true; + } } return false; } diff --git a/drivers/media/usb/cx231xx/cx231xx-417.c b/drivers/media/usb/cx231xx/cx231xx-417.c index c5e21785fafe..02343e88cc61 100644 --- a/drivers/media/usb/cx231xx/cx231xx-417.c +++ b/drivers/media/usb/cx231xx/cx231xx-417.c @@ -1722,6 +1722,8 @@ static void cx231xx_video_dev_init( vfd->lock = &dev->lock; vfd->release = video_device_release_empty; vfd->ctrl_handler = &dev->mpeg_ctrl_handler.hdl; + vfd->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | + V4L2_CAP_VIDEO_CAPTURE; video_set_drvdata(vfd, dev); if (dev->tuner_type == TUNER_ABSENT) { v4l2_disable_ioctl(vfd, VIDIOC_G_FREQUENCY); diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c index 92efe6c1f47b..bda729b42d05 100644 --- a/drivers/media/usb/cx231xx/cx231xx-cards.c +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c @@ -994,6 +994,8 @@ const unsigned int cx231xx_bcount = ARRAY_SIZE(cx231xx_boards); /* table of devices that work with this driver */ struct usb_device_id cx231xx_id_table[] = { + {USB_DEVICE(0x1D19, 0x6108), + .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB}, {USB_DEVICE(0x1D19, 0x6109), .driver_info = CX231XX_BOARD_PV_XCAPTURE_USB}, {USB_DEVICE(0x0572, 0x5A3C), diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c index 8a34e6c0d6a6..f0537b741d13 100644 --- a/drivers/media/usb/dvb-usb-v2/lmedm04.c +++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c @@ -373,6 +373,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) struct dvb_usb_device *d = adap_to_d(adap); struct lme2510_state *lme_int = adap_to_priv(adap); struct usb_host_endpoint *ep; + int ret; lme_int->lme_urb = usb_alloc_urb(0, GFP_KERNEL); @@ -390,11 +391,20 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap) /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe); + if (!ep) { + usb_free_urb(lme_int->lme_urb); + return -ENODEV; + } if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK) lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa); - usb_submit_urb(lme_int->lme_urb, GFP_KERNEL); + ret = usb_submit_urb(lme_int->lme_urb, GFP_KERNEL); + if (ret) { + usb_free_urb(lme_int->lme_urb); + return ret; + } + info("INT Interrupt Service Started"); return 0; diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c index 8b6a57f170d0..bdff64a29a33 100644 --- a/drivers/media/usb/gspca/ov534.c +++ b/drivers/media/usb/gspca/ov534.c @@ -847,7 +847,7 @@ static void set_frame_rate(struct gspca_dev *gspca_dev) r = rate_1; i = ARRAY_SIZE(rate_1); } - while (--i > 0) { + while (--i >= 0) { if (sd->frame_rate >= r->fps) break; r++; diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c index dffc9d03235c..69f9f451ab40 100644 --- a/drivers/media/usb/uvc/uvc_ctrl.c +++ b/drivers/media/usb/uvc/uvc_ctrl.c @@ -1470,6 +1470,40 @@ static void uvc_ctrl_send_slave_event(struct uvc_video_chain *chain, uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes); } +static void uvc_ctrl_set_handle(struct uvc_fh *handle, struct uvc_control *ctrl, + struct uvc_fh *new_handle) +{ + lockdep_assert_held(&handle->chain->ctrl_mutex); + + if (new_handle) { + if (ctrl->handle) + dev_warn_ratelimited(&handle->stream->dev->udev->dev, + "UVC non compliance: Setting an async control with a pending operation."); + + if (new_handle == ctrl->handle) + return; + + if (ctrl->handle) { + WARN_ON(!ctrl->handle->pending_async_ctrls); + if (ctrl->handle->pending_async_ctrls) + ctrl->handle->pending_async_ctrls--; + } + + ctrl->handle = new_handle; + handle->pending_async_ctrls++; + return; + } + + /* Cannot clear the handle for a control not owned by us.*/ + if (WARN_ON(ctrl->handle != handle)) + return; + + ctrl->handle = NULL; + if (WARN_ON(!handle->pending_async_ctrls)) + return; + handle->pending_async_ctrls--; +} + void uvc_ctrl_status_event(struct uvc_video_chain *chain, struct uvc_control *ctrl, const u8 *data) { @@ -1480,7 +1514,8 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain, mutex_lock(&chain->ctrl_mutex); handle = ctrl->handle; - ctrl->handle = NULL; + if (handle) + uvc_ctrl_set_handle(handle, ctrl, NULL); list_for_each_entry(mapping, &ctrl->info.mappings, list) { s32 value = __uvc_ctrl_get_value(mapping, data); @@ -1531,10 +1566,8 @@ bool uvc_ctrl_status_event_async(struct urb *urb, struct uvc_video_chain *chain, struct uvc_device *dev = chain->dev; struct uvc_ctrl_work *w = &dev->async_ctrl; - if (list_empty(&ctrl->info.mappings)) { - ctrl->handle = NULL; + if (list_empty(&ctrl->info.mappings)) return false; - } w->data = data; w->urb = urb; @@ -1564,13 +1597,13 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle, { struct uvc_control_mapping *mapping; struct uvc_control *ctrl; - u32 changes = V4L2_EVENT_CTRL_CH_VALUE; unsigned int i; unsigned int j; for (i = 0; i < xctrls_count; ++i) { - ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping); + u32 changes = V4L2_EVENT_CTRL_CH_VALUE; + ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping); if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) /* Notification will be sent from an Interrupt event. */ continue; @@ -1702,7 +1735,10 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain) } static int uvc_ctrl_commit_entity(struct uvc_device *dev, - struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl) + struct uvc_fh *handle, + struct uvc_entity *entity, + int rollback, + struct uvc_control **err_ctrl) { struct uvc_control *ctrl; unsigned int i; @@ -1750,6 +1786,10 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev, *err_ctrl = ctrl; return ret; } + + if (!rollback && handle && + ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) + uvc_ctrl_set_handle(handle, ctrl, handle); } return 0; @@ -1786,18 +1826,20 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback, /* Find the control. */ list_for_each_entry(entity, &chain->entities, chain) { - ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback, - &err_ctrl); - if (ret < 0) + ret = uvc_ctrl_commit_entity(chain->dev, handle, entity, + rollback, &err_ctrl); + if (ret < 0) { + if (ctrls) + ctrls->error_idx = + uvc_ctrl_find_ctrl_idx(entity, ctrls, + err_ctrl); goto done; + } } if (!rollback) uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count); done: - if (ret < 0 && ctrls) - ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls, - err_ctrl); mutex_unlock(&chain->ctrl_mutex); return ret; } @@ -1927,9 +1969,6 @@ int uvc_ctrl_set(struct uvc_fh *handle, mapping->set(mapping, value, uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT)); - if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS) - ctrl->handle = handle; - ctrl->dirty = 1; ctrl->modified = 1; return 0; @@ -2102,7 +2141,7 @@ static int uvc_ctrl_init_xu_ctrl(struct uvc_device *dev, int uvc_xu_ctrl_query(struct uvc_video_chain *chain, struct uvc_xu_control_query *xqry) { - struct uvc_entity *entity; + struct uvc_entity *entity, *iter; struct uvc_control *ctrl; unsigned int i; bool found; @@ -2112,16 +2151,16 @@ int uvc_xu_ctrl_query(struct uvc_video_chain *chain, int ret; /* Find the extension unit. */ - found = false; - list_for_each_entry(entity, &chain->entities, chain) { - if (UVC_ENTITY_TYPE(entity) == UVC_VC_EXTENSION_UNIT && - entity->id == xqry->unit) { - found = true; + entity = NULL; + list_for_each_entry(iter, &chain->entities, chain) { + if (UVC_ENTITY_TYPE(iter) == UVC_VC_EXTENSION_UNIT && + iter->id == xqry->unit) { + entity = iter; break; } } - if (!found) { + if (!entity) { uvc_dbg(chain->dev, CONTROL, "Extension unit %u not found\n", xqry->unit); return -ENOENT; @@ -2258,7 +2297,7 @@ int uvc_ctrl_restore_values(struct uvc_device *dev) ctrl->dirty = 1; } - ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL); + ret = uvc_ctrl_commit_entity(dev, NULL, entity, 0, NULL); if (ret < 0) return ret; } @@ -2662,6 +2701,26 @@ int uvc_ctrl_init_device(struct uvc_device *dev) return 0; } +void uvc_ctrl_cleanup_fh(struct uvc_fh *handle) +{ + struct uvc_entity *entity; + + guard(mutex)(&handle->chain->ctrl_mutex); + + if (!handle->pending_async_ctrls) + return; + + list_for_each_entry(entity, &handle->chain->dev->entities, list) { + for (unsigned int i = 0; i < entity->ncontrols; ++i) { + if (entity->controls[i].handle != handle) + continue; + uvc_ctrl_set_handle(handle, &entity->controls[i], NULL); + } + } + + WARN_ON(handle->pending_async_ctrls); +} + /* * Cleanup device controls. */ diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c index 94f04eab4c4b..94bb7091de53 100644 --- a/drivers/media/usb/uvc/uvc_driver.c +++ b/drivers/media/usb/uvc/uvc_driver.c @@ -1248,18 +1248,15 @@ static int uvc_gpio_parse(struct uvc_device *dev) struct gpio_desc *gpio_privacy; int irq; - gpio_privacy = devm_gpiod_get_optional(&dev->udev->dev, "privacy", + gpio_privacy = devm_gpiod_get_optional(&dev->intf->dev, "privacy", GPIOD_IN); if (IS_ERR_OR_NULL(gpio_privacy)) return PTR_ERR_OR_ZERO(gpio_privacy); irq = gpiod_to_irq(gpio_privacy); - if (irq < 0) { - if (irq != EPROBE_DEFER) - dev_err(&dev->udev->dev, - "No IRQ for privacy GPIO (%d)\n", irq); - return irq; - } + if (irq < 0) + return dev_err_probe(&dev->intf->dev, irq, + "No IRQ for privacy GPIO\n"); unit = uvc_alloc_entity(UVC_EXT_GPIO_UNIT, UVC_EXT_GPIO_UNIT_ID, 0, 1); if (!unit) @@ -1284,15 +1281,27 @@ static int uvc_gpio_parse(struct uvc_device *dev) static int uvc_gpio_init_irq(struct uvc_device *dev) { struct uvc_entity *unit = dev->gpio_unit; + int ret; if (!unit || unit->gpio.irq < 0) return 0; - return devm_request_threaded_irq(&dev->udev->dev, unit->gpio.irq, NULL, - uvc_gpio_irq, - IRQF_ONESHOT | IRQF_TRIGGER_FALLING | - IRQF_TRIGGER_RISING, - "uvc_privacy_gpio", dev); + ret = request_threaded_irq(unit->gpio.irq, NULL, uvc_gpio_irq, + IRQF_ONESHOT | IRQF_TRIGGER_FALLING | + IRQF_TRIGGER_RISING, + "uvc_privacy_gpio", dev); + + unit->gpio.initialized = !ret; + + return ret; +} + +static void uvc_gpio_deinit(struct uvc_device *dev) +{ + if (!dev->gpio_unit || !dev->gpio_unit->gpio.initialized) + return; + + free_irq(dev->gpio_unit->gpio.irq, dev); } /* ------------------------------------------------------------------------ @@ -1886,12 +1895,44 @@ static void uvc_unregister_video(struct uvc_device *dev) { struct uvc_streaming *stream; + uvc_gpio_deinit(dev); + list_for_each_entry(stream, &dev->streams, list) { + /* Nothing to do here, continue. */ if (!video_is_registered(&stream->vdev)) continue; + /* + * For stream->vdev we follow the same logic as: + * vb2_video_unregister_device(). + */ + + /* 1. Take a reference to vdev */ + get_device(&stream->vdev.dev); + + /* 2. Ensure that no new ioctls can be called. */ video_unregister_device(&stream->vdev); - video_unregister_device(&stream->meta.vdev); + + /* 3. Wait for old ioctls to finish. */ + mutex_lock(&stream->mutex); + + /* 4. Stop streaming. */ + uvc_queue_release(&stream->queue); + + mutex_unlock(&stream->mutex); + + put_device(&stream->vdev.dev); + + /* + * For stream->meta.vdev we can directly call: + * vb2_video_unregister_device(). + */ + vb2_video_unregister_device(&stream->meta.vdev); + + /* + * Now both vdevs are not streaming and all the ioctls will + * return -ENODEV. + */ uvc_debugfs_cleanup_stream(stream); } @@ -2400,6 +2441,8 @@ static const struct uvc_device_info uvc_quirk_force_y8 = { * The Logitech cameras listed below have their interface class set to * VENDOR_SPEC because they don't announce themselves as UVC devices, even * though they are compliant. + * + * Sort these by vendor/product ID. */ static const struct usb_device_id uvc_ids[] = { /* Quanta USB2.0 HD UVC Webcam */ @@ -2942,6 +2985,15 @@ static const struct usb_device_id uvc_ids[] = { .bInterfaceProtocol = 0, .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_IGNORE_SELECTOR_UNIT) }, + /* NXP Semiconductors IR VIDEO */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x1fc9, + .idProduct = 0x009b, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax }, /* Oculus VR Positional Tracker DK2 */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/usb/uvc/uvc_queue.c b/drivers/media/usb/uvc/uvc_queue.c index 16fa17bbd15e..83ed7821fa2a 100644 --- a/drivers/media/usb/uvc/uvc_queue.c +++ b/drivers/media/usb/uvc/uvc_queue.c @@ -483,7 +483,8 @@ static void uvc_queue_buffer_complete(struct kref *ref) buf->state = buf->error ? UVC_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); - vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); + vb2_buffer_done(&buf->buf.vb2_buf, buf->error ? VB2_BUF_STATE_ERROR : + VB2_BUF_STATE_DONE); } /* diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c index 4a92c989cf33..d594acf9d8ce 100644 --- a/drivers/media/usb/uvc/uvc_status.c +++ b/drivers/media/usb/uvc/uvc_status.c @@ -267,6 +267,7 @@ int uvc_status_init(struct uvc_device *dev) dev->int_urb = usb_alloc_urb(0, GFP_KERNEL); if (dev->int_urb == NULL) { kfree(dev->status); + dev->status = NULL; return -ENOMEM; } diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index 950b42d78a10..0aaa4fce61da 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c @@ -36,6 +36,12 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain, unsigned int size; int ret; + if (xmap->data_type > UVC_CTRL_DATA_TYPE_BITMASK) { + uvc_dbg(chain->dev, CONTROL, + "Unsupported UVC data type %u\n", xmap->data_type); + return -EINVAL; + } + map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) return -ENOMEM; @@ -607,6 +613,8 @@ static int uvc_v4l2_release(struct file *file) uvc_dbg(stream->dev, CALLS, "%s\n", __func__); + uvc_ctrl_cleanup_fh(handle); + /* Only free resources if this is a privileged handle. */ if (uvc_has_privileges(handle)) uvc_queue_release(&stream->queue); diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h index 33e7475d4e64..45caa8523426 100644 --- a/drivers/media/usb/uvc/uvcvideo.h +++ b/drivers/media/usb/uvc/uvcvideo.h @@ -227,6 +227,7 @@ struct uvc_entity { u8 *bmControls; struct gpio_desc *gpio_privacy; int irq; + bool initialized; } gpio; }; @@ -330,7 +331,11 @@ struct uvc_video_chain { struct uvc_entity *processing; /* Processing unit */ struct uvc_entity *selector; /* Selector unit */ - struct mutex ctrl_mutex; /* Protects ctrl.info */ + struct mutex ctrl_mutex; /* + * Protects ctrl.info, + * ctrl.handle and + * uvc_fh.pending_async_ctrls + */ struct v4l2_prio_state prio; /* V4L2 priority state */ u32 caps; /* V4L2 chain-wide caps */ @@ -584,6 +589,7 @@ struct uvc_fh { struct uvc_video_chain *chain; struct uvc_streaming *stream; enum uvc_handle_state state; + unsigned int pending_async_ctrls; }; struct uvc_driver { @@ -768,6 +774,8 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id, int uvc_xu_ctrl_query(struct uvc_video_chain *chain, struct uvc_xu_control_query *xqry); +void uvc_ctrl_cleanup_fh(struct uvc_fh *handle); + /* Utility functions */ struct usb_host_endpoint *uvc_find_endpoint(struct usb_host_interface *alts, u8 epaddr); diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index 942d0005c55e..4d05873892c1 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -481,25 +481,28 @@ EXPORT_SYMBOL_GPL(v4l2_calc_timeperframe); * @polarities - the horizontal and vertical polarities (same as struct * v4l2_bt_timings polarities). * @interlaced - if this flag is true, it indicates interlaced format - * @fmt - the resulting timings. + * @cap - the v4l2_dv_timings_cap capabilities. + * @timings - the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid CVT format. If so, then it will return true, and fmt will be filled * in with the found CVT timings. */ -bool v4l2_detect_cvt(unsigned frame_height, - unsigned hfreq, - unsigned vsync, - unsigned active_width, +bool v4l2_detect_cvt(unsigned int frame_height, + unsigned int hfreq, + unsigned int vsync, + unsigned int active_width, u32 polarities, bool interlaced, - struct v4l2_dv_timings *fmt) + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *timings) { - int v_fp, v_bp, h_fp, h_bp, hsync; - int frame_width, image_height, image_width; + struct v4l2_dv_timings t = {}; + int v_fp, v_bp, h_fp, h_bp, hsync; + int frame_width, image_height, image_width; bool reduced_blanking; bool rb_v2 = false; - unsigned pix_clk; + unsigned int pix_clk; if (vsync < 4 || vsync > 8) return false; @@ -625,36 +628,39 @@ bool v4l2_detect_cvt(unsigned frame_height, h_fp = h_blank - hsync - h_bp; } - fmt->type = V4L2_DV_BT_656_1120; - fmt->bt.polarities = polarities; - fmt->bt.width = image_width; - fmt->bt.height = image_height; - fmt->bt.hfrontporch = h_fp; - fmt->bt.vfrontporch = v_fp; - fmt->bt.hsync = hsync; - fmt->bt.vsync = vsync; - fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync; + t.type = V4L2_DV_BT_656_1120; + t.bt.polarities = polarities; + t.bt.width = image_width; + t.bt.height = image_height; + t.bt.hfrontporch = h_fp; + t.bt.vfrontporch = v_fp; + t.bt.hsync = hsync; + t.bt.vsync = vsync; + t.bt.hbackporch = frame_width - image_width - h_fp - hsync; if (!interlaced) { - fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync; - fmt->bt.interlaced = V4L2_DV_PROGRESSIVE; + t.bt.vbackporch = frame_height - image_height - v_fp - vsync; + t.bt.interlaced = V4L2_DV_PROGRESSIVE; } else { - fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp - + t.bt.vbackporch = (frame_height - image_height - 2 * v_fp - 2 * vsync) / 2; - fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp - - 2 * vsync - fmt->bt.vbackporch; - fmt->bt.il_vfrontporch = v_fp; - fmt->bt.il_vsync = vsync; - fmt->bt.flags |= V4L2_DV_FL_HALF_LINE; - fmt->bt.interlaced = V4L2_DV_INTERLACED; + t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp - + 2 * vsync - t.bt.vbackporch; + t.bt.il_vfrontporch = v_fp; + t.bt.il_vsync = vsync; + t.bt.flags |= V4L2_DV_FL_HALF_LINE; + t.bt.interlaced = V4L2_DV_INTERLACED; } - fmt->bt.pixelclock = pix_clk; - fmt->bt.standards = V4L2_DV_BT_STD_CVT; + t.bt.pixelclock = pix_clk; + t.bt.standards = V4L2_DV_BT_STD_CVT; if (reduced_blanking) - fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL)) + return false; + *timings = t; return true; } EXPORT_SYMBOL_GPL(v4l2_detect_cvt); @@ -699,22 +705,25 @@ EXPORT_SYMBOL_GPL(v4l2_detect_cvt); * image height, so it has to be passed explicitly. Usually * the native screen aspect ratio is used for this. If it * is not filled in correctly, then 16:9 will be assumed. - * @fmt - the resulting timings. + * @cap - the v4l2_dv_timings_cap capabilities. + * @timings - the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid GTF format. If so, then it will return true, and fmt will be filled * in with the found GTF timings. */ -bool v4l2_detect_gtf(unsigned frame_height, - unsigned hfreq, - unsigned vsync, - u32 polarities, - bool interlaced, - struct v4l2_fract aspect, - struct v4l2_dv_timings *fmt) +bool v4l2_detect_gtf(unsigned int frame_height, + unsigned int hfreq, + unsigned int vsync, + u32 polarities, + bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *timings) { + struct v4l2_dv_timings t = {}; int pix_clk; - int v_fp, v_bp, h_fp, hsync; + int v_fp, v_bp, h_fp, hsync; int frame_width, image_height, image_width; bool default_gtf; int h_blank; @@ -755,7 +764,7 @@ bool v4l2_detect_gtf(unsigned frame_height, u64 num; u32 den; - num = ((image_width * GTF_D_C_PRIME * (u64)hfreq) - + num = (((u64)image_width * GTF_D_C_PRIME * hfreq) - ((u64)image_width * GTF_D_M_PRIME * 1000)); den = (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) * (2 * GTF_CELL_GRAN); @@ -765,7 +774,7 @@ bool v4l2_detect_gtf(unsigned frame_height, u64 num; u32 den; - num = ((image_width * GTF_S_C_PRIME * (u64)hfreq) - + num = (((u64)image_width * GTF_S_C_PRIME * hfreq) - ((u64)image_width * GTF_S_M_PRIME * 1000)); den = (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) * (2 * GTF_CELL_GRAN); @@ -783,36 +792,39 @@ bool v4l2_detect_gtf(unsigned frame_height, h_fp = h_blank / 2 - hsync; - fmt->type = V4L2_DV_BT_656_1120; - fmt->bt.polarities = polarities; - fmt->bt.width = image_width; - fmt->bt.height = image_height; - fmt->bt.hfrontporch = h_fp; - fmt->bt.vfrontporch = v_fp; - fmt->bt.hsync = hsync; - fmt->bt.vsync = vsync; - fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync; + t.type = V4L2_DV_BT_656_1120; + t.bt.polarities = polarities; + t.bt.width = image_width; + t.bt.height = image_height; + t.bt.hfrontporch = h_fp; + t.bt.vfrontporch = v_fp; + t.bt.hsync = hsync; + t.bt.vsync = vsync; + t.bt.hbackporch = frame_width - image_width - h_fp - hsync; if (!interlaced) { - fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync; - fmt->bt.interlaced = V4L2_DV_PROGRESSIVE; + t.bt.vbackporch = frame_height - image_height - v_fp - vsync; + t.bt.interlaced = V4L2_DV_PROGRESSIVE; } else { - fmt->bt.vbackporch = (frame_height - image_height - 2 * v_fp - + t.bt.vbackporch = (frame_height - image_height - 2 * v_fp - 2 * vsync) / 2; - fmt->bt.il_vbackporch = frame_height - image_height - 2 * v_fp - - 2 * vsync - fmt->bt.vbackporch; - fmt->bt.il_vfrontporch = v_fp; - fmt->bt.il_vsync = vsync; - fmt->bt.flags |= V4L2_DV_FL_HALF_LINE; - fmt->bt.interlaced = V4L2_DV_INTERLACED; + t.bt.il_vbackporch = frame_height - image_height - 2 * v_fp - + 2 * vsync - t.bt.vbackporch; + t.bt.il_vfrontporch = v_fp; + t.bt.il_vsync = vsync; + t.bt.flags |= V4L2_DV_FL_HALF_LINE; + t.bt.interlaced = V4L2_DV_INTERLACED; } - fmt->bt.pixelclock = pix_clk; - fmt->bt.standards = V4L2_DV_BT_STD_GTF; + t.bt.pixelclock = pix_clk; + t.bt.standards = V4L2_DV_BT_STD_GTF; if (!default_gtf) - fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + t.bt.flags |= V4L2_DV_FL_REDUCED_BLANKING; + if (!v4l2_valid_dv_timings(&t, cap, NULL, NULL)) + return false; + *timings = t; return true; } EXPORT_SYMBOL_GPL(v4l2_detect_gtf); diff --git a/drivers/media/v4l2-core/v4l2-mc.c b/drivers/media/v4l2-core/v4l2-mc.c index b01474717dca..541c99c24923 100644 --- a/drivers/media/v4l2-core/v4l2-mc.c +++ b/drivers/media/v4l2-core/v4l2-mc.c @@ -321,7 +321,7 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd, sink_sd = media_entity_to_v4l2_subdev(sink->entity); - fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) { + fwnode_graph_for_each_endpoint(src_sd->fwnode, endpoint) { struct fwnode_handle *remote_ep; int src_idx, sink_idx, ret; struct media_pad *src; diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 2351f2708da2..f30b1c0093d3 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -2154,26 +2154,6 @@ static int gpmc_probe_generic_child(struct platform_device *pdev, goto err; } - if (of_node_name_eq(child, "nand")) { - /* Warn about older DT blobs with no compatible property */ - if (!of_property_read_bool(child, "compatible")) { - dev_warn(&pdev->dev, - "Incompatible NAND node: missing compatible"); - ret = -EINVAL; - goto err; - } - } - - if (of_node_name_eq(child, "onenand")) { - /* Warn about older DT blobs with no compatible property */ - if (!of_property_read_bool(child, "compatible")) { - dev_warn(&pdev->dev, - "Incompatible OneNAND node: missing compatible"); - ret = -EINVAL; - goto err; - } - } - if (of_match_node(omap_nand_ids, child)) { /* NAND specific setup */ val = 8; diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c index 7343dbb79c9f..6a2b3c256599 100644 --- a/drivers/memory/renesas-rpc-if.c +++ b/drivers/memory/renesas-rpc-if.c @@ -299,16 +299,17 @@ static void rpcif_rzg2l_timing_adjust_sdr(struct rpcif_priv *rpc) regmap_write(rpc->regmap, RPCIF_PHYADD, 0x80000032); } -int rpcif_hw_init(struct rpcif *rpcif, bool hyperflash) +int rpcif_hw_init(struct device *dev, bool hyperflash) { - struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev); + struct rpcif_priv *rpc = dev_get_drvdata(dev); u32 dummy; + int ret; - pm_runtime_get_sync(rpc->dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; if (rpc->type == RPCIF_RZ_G2L) { - int ret; - ret = reset_control_reset(rpc->rstc); if (ret) return ret; @@ -354,7 +355,7 @@ int rpcif_hw_init(struct rpcif *rpcif, bool hyperflash) regmap_write(rpc->regmap, RPCIF_SSLDR, RPCIF_SSLDR_SPNDL(7) | RPCIF_SSLDR_SLNDL(7) | RPCIF_SSLDR_SCKDL(7)); - pm_runtime_put(rpc->dev); + pm_runtime_put(dev); rpc->bus_size = hyperflash ? 2 : 1; @@ -384,10 +385,10 @@ static u8 rpcif_bit_size(u8 buswidth) return buswidth > 4 ? 2 : ilog2(buswidth); } -void rpcif_prepare(struct rpcif *rpcif, const struct rpcif_op *op, u64 *offs, +void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs, size_t *len) { - struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev); + struct rpcif_priv *rpc = dev_get_drvdata(dev); rpc->smcr = 0; rpc->smadr = 0; @@ -472,13 +473,15 @@ void rpcif_prepare(struct rpcif *rpcif, const struct rpcif_op *op, u64 *offs, } EXPORT_SYMBOL(rpcif_prepare); -int rpcif_manual_xfer(struct rpcif *rpcif) +int rpcif_manual_xfer(struct device *dev) { - struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev); + struct rpcif_priv *rpc = dev_get_drvdata(dev); u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4; int ret = 0; - pm_runtime_get_sync(rpc->dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; regmap_update_bits(rpc->regmap, RPCIF_PHYCNT, RPCIF_PHYCNT_CAL, RPCIF_PHYCNT_CAL); @@ -588,13 +591,13 @@ int rpcif_manual_xfer(struct rpcif *rpcif) } exit: - pm_runtime_put(rpc->dev); + pm_runtime_put(dev); return ret; err_out: if (reset_control_reset(rpc->rstc)) - dev_err(rpc->dev, "Failed to reset HW\n"); - rpcif_hw_init(rpcif, rpc->bus_size == 2); + dev_err(dev, "Failed to reset HW\n"); + rpcif_hw_init(dev, rpc->bus_size == 2); goto exit; } EXPORT_SYMBOL(rpcif_manual_xfer); @@ -641,16 +644,19 @@ static void memcpy_fromio_readw(void *to, } } -ssize_t rpcif_dirmap_read(struct rpcif *rpcif, u64 offs, size_t len, void *buf) +ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf) { - struct rpcif_priv *rpc = dev_get_drvdata(rpcif->dev); + struct rpcif_priv *rpc = dev_get_drvdata(dev); loff_t from = offs & (rpc->size - 1); size_t size = rpc->size - from; + int ret; if (len > size) len = size; - pm_runtime_get_sync(rpc->dev); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) + return ret; regmap_update_bits(rpc->regmap, RPCIF_CMNCR, RPCIF_CMNCR_MD, 0); regmap_write(rpc->regmap, RPCIF_DRCR, 0); @@ -668,7 +674,7 @@ ssize_t rpcif_dirmap_read(struct rpcif *rpcif, u64 offs, size_t len, void *buf) else memcpy_fromio(buf, rpc->dirmap + from, len); - pm_runtime_put(rpc->dev); + pm_runtime_put(dev); return len; } diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c index d1f01f80dcbd..a68e281b7896 100644 --- a/drivers/memory/tegra/tegra20-emc.c +++ b/drivers/memory/tegra/tegra20-emc.c @@ -477,14 +477,15 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc) ram_code = tegra_read_ram_code(); - for (np = of_find_node_by_name(dev->of_node, "emc-tables"); np; - np = of_find_node_by_name(np, "emc-tables")) { + for_each_child_of_node(dev->of_node, np) { + if (!of_node_name_eq(np, "emc-tables")) + continue; err = of_property_read_u32(np, "nvidia,ram-code", &value); if (err || value != ram_code) { struct device_node *lpddr2_np; bool cfg_mismatches = false; - lpddr2_np = of_find_node_by_name(np, "lpddr2"); + lpddr2_np = of_get_child_by_name(np, "lpddr2"); if (lpddr2_np) { const struct lpddr2_info *info; @@ -521,7 +522,6 @@ tegra_emc_find_node_by_ram_code(struct tegra_emc *emc) } if (cfg_mismatches) { - of_node_put(np); continue; } } diff --git a/drivers/memstick/host/rtsx_usb_ms.c b/drivers/memstick/host/rtsx_usb_ms.c index 29271ad4728a..dec279845a75 100644 --- a/drivers/memstick/host/rtsx_usb_ms.c +++ b/drivers/memstick/host/rtsx_usb_ms.c @@ -813,6 +813,7 @@ static int rtsx_usb_ms_drv_remove(struct platform_device *pdev) host->eject = true; cancel_work_sync(&host->handle_req); + cancel_delayed_work_sync(&host->poll_card); mutex_lock(&host->host_mutex); if (host->req) { diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 34901bcd1ce8..452774cf40cb 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -4234,10 +4234,8 @@ mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num, static void mptsas_reprobe_lun(struct scsi_device *sdev, void *data) { - int rc; - sdev->no_uld_attach = data ? 1 : 0; - rc = scsi_device_reprobe(sdev); + WARN_ON(scsi_device_reprobe(sdev)); } static void diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c index b79a57b45c1e..458f919de4cb 100644 --- a/drivers/mfd/da9052-spi.c +++ b/drivers/mfd/da9052-spi.c @@ -37,7 +37,7 @@ static int da9052_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, da9052); config = da9052_regmap_config; - config.read_flag_mask = 1; + config.write_flag_mask = 1; config.reg_bits = 7; config.pad_bits = 1; config.val_bits = 8; diff --git a/drivers/mfd/ene-kb3930.c b/drivers/mfd/ene-kb3930.c index 3eff98e26bea..6f8e39fe44ab 100644 --- a/drivers/mfd/ene-kb3930.c +++ b/drivers/mfd/ene-kb3930.c @@ -162,7 +162,7 @@ static int kb3930_probe(struct i2c_client *client) devm_gpiod_get_array_optional(dev, "off", GPIOD_IN); if (IS_ERR(ddata->off_gpios)) return PTR_ERR(ddata->off_gpios); - if (ddata->off_gpios->ndescs < 2) { + if (ddata->off_gpios && ddata->off_gpios->ndescs < 2) { dev_err(dev, "invalid off-gpios property\n"); return -EINVAL; } diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c index 8dac0d41f64f..3aa7857271da 100644 --- a/drivers/mfd/intel_soc_pmic_bxtwc.c +++ b/drivers/mfd/intel_soc_pmic_bxtwc.c @@ -231,16 +231,46 @@ static const struct resource tmu_resources[] = { }; static struct mfd_cell bxt_wc_dev[] = { - { - .name = "bxt_wcove_gpadc", - .num_resources = ARRAY_SIZE(adc_resources), - .resources = adc_resources, - }, { .name = "bxt_wcove_thermal", .num_resources = ARRAY_SIZE(thermal_resources), .resources = thermal_resources, }, + { + .name = "bxt_wcove_gpio", + .num_resources = ARRAY_SIZE(gpio_resources), + .resources = gpio_resources, + }, + { + .name = "bxt_wcove_region", + }, +}; + +static const struct mfd_cell bxt_wc_tmu_dev[] = { + { + .name = "bxt_wcove_tmu", + .num_resources = ARRAY_SIZE(tmu_resources), + .resources = tmu_resources, + }, +}; + +static const struct mfd_cell bxt_wc_bcu_dev[] = { + { + .name = "bxt_wcove_bcu", + .num_resources = ARRAY_SIZE(bcu_resources), + .resources = bcu_resources, + }, +}; + +static const struct mfd_cell bxt_wc_adc_dev[] = { + { + .name = "bxt_wcove_gpadc", + .num_resources = ARRAY_SIZE(adc_resources), + .resources = adc_resources, + }, +}; + +static struct mfd_cell bxt_wc_chgr_dev[] = { { .name = "bxt_wcove_usbc", .num_resources = ARRAY_SIZE(usbc_resources), @@ -251,25 +281,6 @@ static struct mfd_cell bxt_wc_dev[] = { .num_resources = ARRAY_SIZE(charger_resources), .resources = charger_resources, }, - { - .name = "bxt_wcove_bcu", - .num_resources = ARRAY_SIZE(bcu_resources), - .resources = bcu_resources, - }, - { - .name = "bxt_wcove_tmu", - .num_resources = ARRAY_SIZE(tmu_resources), - .resources = tmu_resources, - }, - - { - .name = "bxt_wcove_gpio", - .num_resources = ARRAY_SIZE(gpio_resources), - .resources = gpio_resources, - }, - { - .name = "bxt_wcove_region", - }, }; static int regmap_ipc_byte_reg_read(void *context, unsigned int reg, @@ -426,6 +437,26 @@ static int bxtwc_add_chained_irq_chip(struct intel_soc_pmic *pmic, 0, chip, data); } +static int bxtwc_add_chained_devices(struct intel_soc_pmic *pmic, + const struct mfd_cell *cells, int n_devs, + struct regmap_irq_chip_data *pdata, + int pirq, int irq_flags, + const struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + struct device *dev = pmic->dev; + struct irq_domain *domain; + int ret; + + ret = bxtwc_add_chained_irq_chip(pmic, pdata, pirq, irq_flags, chip, data); + if (ret) + return dev_err_probe(dev, ret, "Failed to add %s IRQ chip\n", chip->name); + + domain = regmap_irq_get_domain(*data); + + return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, cells, n_devs, NULL, 0, domain); +} + static int bxtwc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -467,6 +498,15 @@ static int bxtwc_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Failed to add IRQ chip\n"); + ret = bxtwc_add_chained_devices(pmic, bxt_wc_tmu_dev, ARRAY_SIZE(bxt_wc_tmu_dev), + pmic->irq_chip_data, + BXTWC_TMU_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_tmu, + &pmic->irq_chip_data_tmu); + if (ret) + return ret; + ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, BXTWC_PWRBTN_LVL1_IRQ, IRQF_ONESHOT, @@ -475,40 +515,32 @@ static int bxtwc_probe(struct platform_device *pdev) if (ret) return dev_err_probe(dev, ret, "Failed to add PWRBTN IRQ chip\n"); - ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, - BXTWC_TMU_LVL1_IRQ, - IRQF_ONESHOT, - &bxtwc_regmap_irq_chip_tmu, - &pmic->irq_chip_data_tmu); + ret = bxtwc_add_chained_devices(pmic, bxt_wc_bcu_dev, ARRAY_SIZE(bxt_wc_bcu_dev), + pmic->irq_chip_data, + BXTWC_BCU_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_bcu, + &pmic->irq_chip_data_bcu); if (ret) - return dev_err_probe(dev, ret, "Failed to add TMU IRQ chip\n"); + return ret; - /* Add chained IRQ handler for BCU IRQs */ - ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, - BXTWC_BCU_LVL1_IRQ, - IRQF_ONESHOT, - &bxtwc_regmap_irq_chip_bcu, - &pmic->irq_chip_data_bcu); + ret = bxtwc_add_chained_devices(pmic, bxt_wc_adc_dev, ARRAY_SIZE(bxt_wc_adc_dev), + pmic->irq_chip_data, + BXTWC_ADC_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_adc, + &pmic->irq_chip_data_adc); if (ret) - return dev_err_probe(dev, ret, "Failed to add BUC IRQ chip\n"); + return ret; - /* Add chained IRQ handler for ADC IRQs */ - ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, - BXTWC_ADC_LVL1_IRQ, - IRQF_ONESHOT, - &bxtwc_regmap_irq_chip_adc, - &pmic->irq_chip_data_adc); + ret = bxtwc_add_chained_devices(pmic, bxt_wc_chgr_dev, ARRAY_SIZE(bxt_wc_chgr_dev), + pmic->irq_chip_data, + BXTWC_CHGR_LVL1_IRQ, + IRQF_ONESHOT, + &bxtwc_regmap_irq_chip_chgr, + &pmic->irq_chip_data_chgr); if (ret) - return dev_err_probe(dev, ret, "Failed to add ADC IRQ chip\n"); - - /* Add chained IRQ handler for CHGR IRQs */ - ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, - BXTWC_CHGR_LVL1_IRQ, - IRQF_ONESHOT, - &bxtwc_regmap_irq_chip_chgr, - &pmic->irq_chip_data_chgr); - if (ret) - return dev_err_probe(dev, ret, "Failed to add CHGR IRQ chip\n"); + return ret; /* Add chained IRQ handler for CRIT IRQs */ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data, diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c index 7b1c597b6879..03367fcac42a 100644 --- a/drivers/mfd/lpc_ich.c +++ b/drivers/mfd/lpc_ich.c @@ -756,8 +756,9 @@ static const struct pci_device_id lpc_ich_ids[] = { { PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME}, { PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9}, { PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M}, - { PCI_VDEVICE(INTEL, 0x3197), LPC_GLK}, { PCI_VDEVICE(INTEL, 0x2b9c), LPC_COUGARMOUNTAIN}, + { PCI_VDEVICE(INTEL, 0x3197), LPC_GLK}, + { PCI_VDEVICE(INTEL, 0x31e8), LPC_GLK}, { PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO}, { PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R}, { PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10}, diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c index df095e91e266..78bafeda5e47 100644 --- a/drivers/mfd/rt5033.c +++ b/drivers/mfd/rt5033.c @@ -82,8 +82,8 @@ static int rt5033_i2c_probe(struct i2c_client *i2c, } dev_info(&i2c->dev, "Device found Device ID: %04x\n", dev_id); - ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + ret = devm_regmap_add_irq_chip(rt5033->dev, rt5033->regmap, + rt5033->irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0, &rt5033_irq_chip, &rt5033->irq_data); if (ret) { dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n", diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 3ac4508a6742..78dcbf8e2c15 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c @@ -920,7 +920,7 @@ static void sm501_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); void __iomem *regs = smchip->regbase; unsigned long save; unsigned long val; @@ -946,7 +946,7 @@ static int sm501_gpio_input(struct gpio_chip *chip, unsigned offset) struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; void __iomem *regs = smchip->regbase; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); unsigned long save; unsigned long ddr; @@ -971,7 +971,7 @@ static int sm501_gpio_output(struct gpio_chip *chip, { struct sm501_gpio_chip *smchip = gpiochip_get_data(chip); struct sm501_gpio *smgpio = smchip->ourgpio; - unsigned long bit = 1 << offset; + unsigned long bit = BIT(offset); void __iomem *regs = smchip->regbase; unsigned long save; unsigned long val; diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index ecfe15122091..8302cd63a73d 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -8,12 +8,14 @@ * Author: Dong Aisheng */ +#include #include #include #include #include #include #include +#include #include #include #include @@ -25,7 +27,7 @@ static struct platform_driver syscon_driver; -static DEFINE_SPINLOCK(syscon_list_slock); +static DEFINE_MUTEX(syscon_list_lock); static LIST_HEAD(syscon_list); struct syscon { @@ -43,7 +45,6 @@ static const struct regmap_config syscon_regmap_config = { static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) { struct clk *clk; - struct syscon *syscon; struct regmap *regmap; void __iomem *base; u32 reg_io_width; @@ -51,20 +52,18 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) struct regmap_config syscon_config = syscon_regmap_config; struct resource res; - syscon = kzalloc(sizeof(*syscon), GFP_KERNEL); + WARN_ON(!mutex_is_locked(&syscon_list_lock)); + + struct syscon *syscon __free(kfree) = kzalloc(sizeof(*syscon), GFP_KERNEL); if (!syscon) return ERR_PTR(-ENOMEM); - if (of_address_to_resource(np, 0, &res)) { - ret = -ENOMEM; - goto err_map; - } + if (of_address_to_resource(np, 0, &res)) + return ERR_PTR(-ENOMEM); base = of_iomap(np, 0); - if (!base) { - ret = -ENOMEM; - goto err_map; - } + if (!base) + return ERR_PTR(-ENOMEM); /* Parse the device's DT node for an endianness specification */ if (of_property_read_bool(np, "big-endian")) @@ -135,11 +134,9 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) syscon->regmap = regmap; syscon->np = np; - spin_lock(&syscon_list_slock); list_add_tail(&syscon->list, &syscon_list); - spin_unlock(&syscon_list_slock); - return syscon; + return_ptr(syscon); err_attach: if (!IS_ERR(clk)) @@ -148,8 +145,6 @@ err_clk: regmap_exit(regmap); err_regmap: iounmap(base); -err_map: - kfree(syscon); return ERR_PTR(ret); } @@ -158,7 +153,7 @@ static struct regmap *device_node_get_regmap(struct device_node *np, { struct syscon *entry, *syscon = NULL; - spin_lock(&syscon_list_slock); + mutex_lock(&syscon_list_lock); list_for_each_entry(entry, &syscon_list, list) if (entry->np == np) { @@ -166,17 +161,65 @@ static struct regmap *device_node_get_regmap(struct device_node *np, break; } - spin_unlock(&syscon_list_slock); - if (!syscon) syscon = of_syscon_register(np, check_clk); + mutex_unlock(&syscon_list_lock); + if (IS_ERR(syscon)) return ERR_CAST(syscon); return syscon->regmap; } +/** + * of_syscon_register_regmap() - Register regmap for specified device node + * @np: Device tree node + * @regmap: Pointer to regmap object + * + * Register an externally created regmap object with syscon for the specified + * device tree node. This regmap will then be returned to client drivers using + * the syscon_regmap_lookup_by_phandle() API. + * + * Return: 0 on success, negative error code on failure. + */ +int of_syscon_register_regmap(struct device_node *np, struct regmap *regmap) +{ + struct syscon *entry, *syscon = NULL; + int ret; + + if (!np || !regmap) + return -EINVAL; + + syscon = kzalloc(sizeof(*syscon), GFP_KERNEL); + if (!syscon) + return -ENOMEM; + + /* check if syscon entry already exists */ + mutex_lock(&syscon_list_lock); + + list_for_each_entry(entry, &syscon_list, list) + if (entry->np == np) { + ret = -EEXIST; + goto err_unlock; + } + + syscon->regmap = regmap; + syscon->np = np; + + /* register the regmap in syscon list */ + list_add_tail(&syscon->list, &syscon_list); + mutex_unlock(&syscon_list_lock); + + return 0; + +err_unlock: + mutex_unlock(&syscon_list_lock); + kfree(syscon); + return ret; +} +EXPORT_SYMBOL_GPL(of_syscon_register_regmap); + struct regmap *device_node_to_regmap(struct device_node *np) { return device_node_get_regmap(np, false); diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c index c2afa2e69f42..35ddffb96ba1 100644 --- a/drivers/mfd/tps65010.c +++ b/drivers/mfd/tps65010.c @@ -548,17 +548,13 @@ static int tps65010_probe(struct i2c_client *client, */ if (client->irq > 0) { status = request_irq(client->irq, tps65010_irq, - IRQF_TRIGGER_FALLING, DRIVER_NAME, tps); + IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN, + DRIVER_NAME, tps); if (status < 0) { dev_dbg(&client->dev, "can't get IRQ %d, err %d\n", client->irq, status); return status; } - /* annoying race here, ideally we'd have an option - * to claim the irq now and enable it later. - * FIXME genirq IRQF_NOAUTOEN now solves that ... - */ - disable_irq(client->irq); set_bit(FLAG_IRQ_ENABLE, &tps->flags); } else dev_warn(&client->dev, "IRQ not configured!\n"); diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c index e2100cc42ce8..668609d22fe1 100644 --- a/drivers/misc/apds990x.c +++ b/drivers/misc/apds990x.c @@ -1148,7 +1148,7 @@ static int apds990x_probe(struct i2c_client *client, err = chip->pdata->setup_resources(); if (err) { err = -EINVAL; - goto fail3; + goto fail4; } } @@ -1156,7 +1156,7 @@ static int apds990x_probe(struct i2c_client *client, apds990x_attribute_group); if (err < 0) { dev_err(&chip->client->dev, "Sysfs registration failed\n"); - goto fail4; + goto fail5; } err = request_threaded_irq(client->irq, NULL, @@ -1167,15 +1167,17 @@ static int apds990x_probe(struct i2c_client *client, if (err) { dev_err(&client->dev, "could not get IRQ %d\n", client->irq); - goto fail5; + goto fail6; } return err; -fail5: +fail6: sysfs_remove_group(&chip->client->dev.kobj, &apds990x_attribute_group[0]); -fail4: +fail5: if (chip->pdata && chip->pdata->release_resources) chip->pdata->release_resources(); +fail4: + pm_runtime_disable(&client->dev); fail3: regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs); fail2: diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c index 4eddc5ba1af9..dfaedc0e350d 100644 --- a/drivers/misc/eeprom/digsy_mtc_eeprom.c +++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c @@ -60,7 +60,7 @@ static struct platform_device digsy_mtc_eeprom = { }; static struct gpiod_lookup_table eeprom_spi_gpiod_table = { - .dev_id = "spi_gpio", + .dev_id = "spi_gpio.1", .table = { GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK, "sck", GPIO_ACTIVE_HIGH), diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c index 9627294fe3e9..4c9827fe9217 100644 --- a/drivers/misc/eeprom/eeprom_93cx6.c +++ b/drivers/misc/eeprom/eeprom_93cx6.c @@ -186,6 +186,11 @@ void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word, eeprom_93cx6_write_bits(eeprom, command, PCI_EEPROM_WIDTH_OPCODE + eeprom->width); + if (has_quirk_extra_read_cycle(eeprom)) { + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); + } + /* * Read the requested 16 bits. */ @@ -252,6 +257,11 @@ void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom, const u8 byte, eeprom_93cx6_write_bits(eeprom, command, PCI_EEPROM_WIDTH_OPCODE + eeprom->width + 1); + if (has_quirk_extra_read_cycle(eeprom)) { + eeprom_93cx6_pulse_high(eeprom); + eeprom_93cx6_pulse_low(eeprom); + } + /* * Read the requested 8 bits. */ diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 6c94364019c8..0722de355e6e 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -934,7 +934,7 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) mmap_read_lock(current->mm); vma = find_vma(current->mm, ctx->args[i].ptr); if (vma) - pages[i].addr += ctx->args[i].ptr - + pages[i].addr += (ctx->args[i].ptr & PAGE_MASK) - vma->vm_start; mmap_read_unlock(current->mm); @@ -961,8 +961,8 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) (pkt_size - rlen); pages[i].addr = pages[i].addr & PAGE_MASK; - pg_start = (args & PAGE_MASK) >> PAGE_SHIFT; - pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT; + pg_start = (rpra[i].buf.pv & PAGE_MASK) >> PAGE_SHIFT; + pg_end = ((rpra[i].buf.pv + len - 1) & PAGE_MASK) >> PAGE_SHIFT; pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE; args = args + mlen; rlen -= mlen; @@ -2119,7 +2119,7 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) err = fastrpc_device_register(rdev, data, false, domains[domain_id]); if (err) - goto fdev_error; + goto populate_error; break; default: err = -EINVAL; diff --git a/drivers/misc/lkdtm/perms.c b/drivers/misc/lkdtm/perms.c index b93404d65650..e82d9543a0c5 100644 --- a/drivers/misc/lkdtm/perms.c +++ b/drivers/misc/lkdtm/perms.c @@ -28,6 +28,13 @@ static const unsigned long rodata = 0xAA55AA55; /* This is marked __ro_after_init, so it should ultimately be .rodata. */ static unsigned long ro_after_init __ro_after_init = 0x55AA5500; +/* + * This is a pointer to do_nothing() which is initialized at runtime rather + * than build time to avoid objtool IBT validation warnings caused by an + * inlined unrolled memcpy() in execute_location(). + */ +static void __ro_after_init *do_nothing_ptr; + /* * This just returns to the caller. It is designed to be copied into * non-executable memory regions. @@ -65,13 +72,12 @@ static noinline void execute_location(void *dst, bool write) { void (*func)(void); func_desc_t fdesc; - void *do_nothing_text = dereference_function_descriptor(do_nothing); - pr_info("attempting ok execution at %px\n", do_nothing_text); + pr_info("attempting ok execution at %px\n", do_nothing_ptr); do_nothing(); if (write == CODE_WRITE) { - memcpy(dst, do_nothing_text, EXEC_SIZE); + memcpy(dst, do_nothing_ptr, EXEC_SIZE); flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE); } @@ -267,6 +273,8 @@ static void lkdtm_ACCESS_NULL(void) void __init lkdtm_perms_init(void) { + do_nothing_ptr = dereference_function_descriptor(do_nothing); + /* Make sure we can write to __ro_after_init values during __init */ ro_after_init |= 0xAA; } diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c index 3389803cb281..e9f40abf21d2 100644 --- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c +++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_gpio.c @@ -37,6 +37,7 @@ struct pci1xxxx_gpio { struct auxiliary_device *aux_dev; void __iomem *reg_base; + raw_spinlock_t wa_lock; struct gpio_chip gpio; spinlock_t lock; int irq_base; @@ -148,7 +149,7 @@ static int pci1xxxx_gpio_set_config(struct gpio_chip *gpio, unsigned int offset, pci1xxx_assign_bit(priv->reg_base, OPENDRAIN_OFFSET(offset), (offset % 32), true); break; default: - ret = -EOPNOTSUPP; + ret = -ENOTSUPP; break; } spin_unlock_irqrestore(&priv->lock, flags); @@ -164,7 +165,7 @@ static void pci1xxxx_gpio_irq_ack(struct irq_data *data) unsigned long flags; spin_lock_irqsave(&priv->lock, flags); - pci1xxx_assign_bit(priv->reg_base, INTR_STAT_OFFSET(gpio), (gpio % 32), true); + writel(BIT(gpio % 32), priv->reg_base + INTR_STAT_OFFSET(gpio)); spin_unlock_irqrestore(&priv->lock, flags); } @@ -250,6 +251,7 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id) struct pci1xxxx_gpio *priv = dev_id; struct gpio_chip *gc = &priv->gpio; unsigned long int_status = 0; + unsigned long wa_flags; unsigned long flags; u8 pincount; int bit; @@ -273,7 +275,9 @@ static irqreturn_t pci1xxxx_gpio_irq_handler(int irq, void *dev_id) writel(BIT(bit), priv->reg_base + INTR_STATUS_OFFSET(gpiobank)); spin_unlock_irqrestore(&priv->lock, flags); irq = irq_find_mapping(gc->irq.domain, (bit + (gpiobank * 32))); + raw_spin_lock_irqsave(&priv->wa_lock, wa_flags); generic_handle_irq(irq); + raw_spin_unlock_irqrestore(&priv->wa_lock, wa_flags); } } spin_lock_irqsave(&priv->lock, flags); diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index d3c03d4edbef..4adfa5af162f 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -117,6 +117,9 @@ #define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */ +#define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */ +#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index dd4d92fa44c6..17648443a419 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -124,6 +124,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 18059a12d4e1..7b5fc0a50231 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -245,7 +245,7 @@ static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test) return true; fail: - switch (irq_type) { + switch (test->irq_type) { case IRQ_TYPE_LEGACY: dev_err(dev, "Failed to request IRQ %d for Legacy\n", pci_irq_vector(pdev, i)); @@ -262,6 +262,9 @@ fail: break; } + test->num_irqs = i; + pci_endpoint_test_release_irq(test); + return false; } @@ -714,6 +717,7 @@ static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, if (!pci_endpoint_test_request_irq(test)) goto err; + irq_type = test->irq_type; return true; err: diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index d8762fa3d5cd..819d8a843501 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -149,6 +149,8 @@ static void mmc_bus_shutdown(struct device *dev) if (dev->driver && drv->shutdown) drv->shutdown(card); + __mmc_stop_host(host); + if (host->bus_ops->shutdown) { ret = host->bus_ops->shutdown(host); if (ret) diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index f09c3f34805e..21005e26efa2 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h @@ -82,6 +82,7 @@ struct mmc_fixup { #define CID_MANFID_SANDISK_SD 0x3 #define CID_MANFID_ATP 0x9 #define CID_MANFID_TOSHIBA 0x11 +#define CID_MANFID_GIGASTONE 0x12 #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 #define CID_MANFID_APACER 0x27 @@ -285,4 +286,10 @@ static inline int mmc_card_broken_cache_flush(const struct mmc_card *c) { return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH; } + +static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c) +{ + return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY; +} + #endif diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index f0c3e39c6e24..4e7ea76ca665 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2293,6 +2293,9 @@ void mmc_start_host(struct mmc_host *host) void __mmc_stop_host(struct mmc_host *host) { + if (host->rescan_disable) + return; + if (host->slot.cd_irq >= 0) { mmc_gpio_set_cd_wake(host, false); disable_irq(host->slot.cd_irq); diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index 6f2d5d84df04..bb20aa286e70 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -25,6 +25,15 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = { 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, MMC_QUIRK_BROKEN_SD_CACHE, EXT_CSD_REV_ANY), + /* + * GIGASTONE Gaming Plus microSD cards manufactured on 02/2022 never + * clear Flush Cache bit and set Poweroff Notification Ready bit. + */ + _FIXUP_EXT("ASTC", CID_MANFID_GIGASTONE, 0x3456, 2022, 2, + 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, + MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY, + EXT_CSD_REV_ANY), + END_FIXUP }; diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index b2919fef6a79..ddaf3dd3893c 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -1118,7 +1118,7 @@ static int sd_parse_ext_reg_power(struct mmc_card *card, u8 fno, u8 page, card->ext_power.rev = reg_buf[0] & 0xf; /* Power Off Notification support at bit 4. */ - if (reg_buf[1] & BIT(4)) + if ((reg_buf[1] & BIT(4)) && !mmc_card_broken_sd_poweroff_notify(card)) card->ext_power.feature_support |= SD_EXT_POWER_OFF_NOTIFY; /* Power Sustenance support at bit 5. */ diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 5914516df2f7..cb87e8273779 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -458,6 +458,8 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card) if (mmc_card_sd_combo(card)) max_dtr = min(max_dtr, mmc_sd_get_max_clock(card)); + max_dtr = min_not_zero(max_dtr, card->quirk_max_rate); + return max_dtr; } diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index dd18440a90c5..914c0e84b5cc 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2506,8 +2506,10 @@ static int atmci_probe(struct platform_device *pdev) /* Get MCI capabilities and set operations according to it */ atmci_get_cap(host); ret = atmci_configure_dma(host); - if (ret == -EPROBE_DEFER) + if (ret == -EPROBE_DEFER) { + clk_disable_unprepare(host->mck); goto err_dma_probe_defer; + } if (ret == 0) { host->prepare_data = &atmci_prepare_data_dma; host->submit_data = &atmci_submit_data_dma; diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c index 9f20ac524c8b..2a5c3c822f6a 100644 --- a/drivers/mmc/host/dw_mmc-exynos.c +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -28,6 +28,8 @@ enum dw_mci_exynos_type { DW_MCI_TYPE_EXYNOS5420_SMU, DW_MCI_TYPE_EXYNOS7, DW_MCI_TYPE_EXYNOS7_SMU, + DW_MCI_TYPE_EXYNOS7870, + DW_MCI_TYPE_EXYNOS7870_SMU, DW_MCI_TYPE_ARTPEC8, }; @@ -70,6 +72,12 @@ static struct dw_mci_exynos_compatible { }, { .compatible = "samsung,exynos7-dw-mshc-smu", .ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU, + }, { + .compatible = "samsung,exynos7870-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS7870, + }, { + .compatible = "samsung,exynos7870-dw-mshc-smu", + .ctrl_type = DW_MCI_TYPE_EXYNOS7870_SMU, }, { .compatible = "axis,artpec8-dw-mshc", .ctrl_type = DW_MCI_TYPE_ARTPEC8, @@ -86,6 +94,8 @@ static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host) return EXYNOS4210_FIXED_CIU_CLK_DIV; else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1; else @@ -101,7 +111,8 @@ static void dw_mci_exynos_config_smu(struct dw_mci *host) * set for non-ecryption mode at this time. */ if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU || - priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) { + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) { mci_writel(host, MPSBEGIN0, 0); mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX); mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT | @@ -127,6 +138,12 @@ static int dw_mci_exynos_priv_init(struct dw_mci *host) DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl); } + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU) { + /* Quirk needed for certain Exynos SoCs */ + host->quirks |= DW_MMC_QUIRK_FIFO64_32; + } + if (priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) { /* Quirk needed for the ARTPEC-8 SoC */ host->quirks |= DW_MMC_QUIRK_EXTENDED_TMOUT; @@ -144,6 +161,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) clksel = mci_readl(host, CLKSEL64); else @@ -153,6 +172,8 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing) if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) mci_writel(host, CLKSEL64, clksel); else @@ -223,6 +244,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev) if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) clksel = mci_readl(host, CLKSEL64); else @@ -231,6 +254,8 @@ static int dw_mci_exynos_resume_noirq(struct device *dev) if (clksel & SDMMC_CLKSEL_WAKEUP_INT) { if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) mci_writel(host, CLKSEL64, clksel); else @@ -410,6 +435,8 @@ static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64)); else @@ -423,6 +450,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) clksel = mci_readl(host, CLKSEL64); else @@ -430,6 +459,8 @@ static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample); if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) mci_writel(host, CLKSEL64, clksel); else @@ -444,6 +475,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) clksel = mci_readl(host, CLKSEL64); else @@ -454,6 +487,8 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 || priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870 || + priv->ctrl_type == DW_MCI_TYPE_EXYNOS7870_SMU || priv->ctrl_type == DW_MCI_TYPE_ARTPEC8) mci_writel(host, CLKSEL64, clksel); else @@ -633,6 +668,10 @@ static const struct of_device_id dw_mci_exynos_match[] = { .data = &exynos_drv_data, }, { .compatible = "samsung,exynos7-dw-mshc-smu", .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos7870-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos7870-dw-mshc-smu", + .data = &exynos_drv_data, }, { .compatible = "axis,artpec8-dw-mshc", .data = &artpec_drv_data, }, {}, diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index d3c696808cb3..ca68e43b5457 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -2703,6 +2703,91 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) } } +static void dw_mci_push_data64_32(struct dw_mci *host, void *buf, int cnt) +{ + struct mmc_data *data = host->data; + int init_cnt = cnt; + + /* try and push anything in the part_buf */ + if (unlikely(host->part_buf_count)) { + int len = dw_mci_push_part_bytes(host, buf, cnt); + + buf += len; + cnt -= len; + + if (host->part_buf_count == 8) { + mci_fifo_l_writeq(host->fifo_reg, host->part_buf); + host->part_buf_count = 0; + } + } +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x7)) { + while (cnt >= 8) { + u64 aligned_buf[16]; + int len = min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + int i; + /* memcpy from input buffer into aligned buffer */ + memcpy(aligned_buf, buf, len); + buf += len; + cnt -= len; + /* push data from aligned buffer into fifo */ + for (i = 0; i < items; ++i) + mci_fifo_l_writeq(host->fifo_reg, aligned_buf[i]); + } + } else +#endif + { + u64 *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + mci_fifo_l_writeq(host->fifo_reg, *pdata++); + buf = pdata; + } + /* put anything remaining in the part_buf */ + if (cnt) { + dw_mci_set_part_bytes(host, buf, cnt); + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) + mci_fifo_l_writeq(host->fifo_reg, host->part_buf); + } +} + +static void dw_mci_pull_data64_32(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + if (unlikely((unsigned long)buf & 0x7)) { + while (cnt >= 8) { + /* pull data from fifo into aligned buffer */ + u64 aligned_buf[16]; + int len = min(cnt & -8, (int)sizeof(aligned_buf)); + int items = len >> 3; + int i; + + for (i = 0; i < items; ++i) + aligned_buf[i] = mci_fifo_l_readq(host->fifo_reg); + + /* memcpy from aligned buffer into output buffer */ + memcpy(buf, aligned_buf, len); + buf += len; + cnt -= len; + } + } else +#endif + { + u64 *pdata = buf; + + for (; cnt >= 8; cnt -= 8) + *pdata++ = mci_fifo_l_readq(host->fifo_reg); + buf = pdata; + } + if (cnt) { + host->part_buf = mci_fifo_l_readq(host->fifo_reg); + dw_mci_pull_final_bytes(host, buf, cnt); + } +} + static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) { int len; @@ -3583,8 +3668,13 @@ int dw_mci_probe(struct dw_mci *host) width = 16; host->data_shift = 1; } else if (i == 2) { - host->push_data = dw_mci_push_data64; - host->pull_data = dw_mci_pull_data64; + if ((host->quirks & DW_MMC_QUIRK_FIFO64_32)) { + host->push_data = dw_mci_push_data64_32; + host->pull_data = dw_mci_pull_data64_32; + } else { + host->push_data = dw_mci_push_data64; + host->pull_data = dw_mci_pull_data64; + } width = 64; host->data_shift = 3; } else { diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 017d8992c5b9..cf47d5dec1e1 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -286,6 +286,8 @@ struct dw_mci_board { /* Support for longer data read timeout */ #define DW_MMC_QUIRK_EXTENDED_TMOUT BIT(0) +/* Force 32-bit access to the FIFO */ +#define DW_MMC_QUIRK_FIFO64_32 BIT(1) #define DW_MMC_240A 0x240a #define DW_MMC_280A 0x280a @@ -477,6 +479,31 @@ struct dw_mci_board { #define mci_fifo_writel(__value, __reg) __raw_writel(__reg, __value) #define mci_fifo_writeq(__value, __reg) __raw_writeq(__reg, __value) +/* + * Some dw_mmc devices have 64-bit FIFOs, but expect them to be + * accessed using two 32-bit accesses. If such controller is used + * with a 64-bit kernel, this has to be done explicitly. + */ +static inline u64 mci_fifo_l_readq(void __iomem *addr) +{ + u64 ans; + u32 proxy[2]; + + proxy[0] = mci_fifo_readl(addr); + proxy[1] = mci_fifo_readl(addr + 4); + memcpy(&ans, proxy, 8); + return ans; +} + +static inline void mci_fifo_l_writeq(void __iomem *addr, u64 value) +{ + u32 proxy[2]; + + memcpy(proxy, &value, 8); + mci_fifo_writel(addr, proxy[0]); + mci_fifo_writel(addr + 4, proxy[1]); +} + /* Register access macros */ #define mci_readl(dev, reg) \ readl_relaxed((dev)->regs + SDMMC_##reg) diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 2a99ffb61f8c..30b93dc938f1 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -223,10 +223,6 @@ static int mmc_spi_response_get(struct mmc_spi_host *host, u8 leftover = 0; unsigned short rotator; int i; - char tag[32]; - - snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s", - cmd->opcode, maptype(cmd)); /* Except for data block reads, the whole response will already * be stored in the scratch buffer. It's somewhere after the @@ -379,8 +375,9 @@ checkstatus: } if (value < 0) - dev_dbg(&host->spi->dev, "%s: resp %04x %08x\n", - tag, cmd->resp[0], cmd->resp[1]); + dev_dbg(&host->spi->dev, + " ... CMD%d response SPI_%s: resp %04x %08x\n", + cmd->opcode, maptype(cmd), cmd->resp[0], cmd->resp[1]); /* disable chipselect on errors and some success cases */ if (value >= 0 && cs_on) diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index ba18e9fa64b1..04f6b8037e59 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -262,6 +262,7 @@ #define MSDC_PAD_TUNE_CMD_SEL BIT(21) /* RW */ #define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */ +#define PAD_DS_TUNE_DLY2_SEL BIT(1) /* RW */ #define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */ #define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */ #define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */ @@ -307,6 +308,7 @@ /* EMMC50_PAD_DS_TUNE mask */ #define PAD_DS_DLY_SEL BIT(16) /* RW */ +#define PAD_DS_DLY2_SEL BIT(15) /* RW */ #define PAD_DS_DLY1 GENMASK(14, 10) /* RW */ #define PAD_DS_DLY3 GENMASK(4, 0) /* RW */ @@ -2293,13 +2295,23 @@ tune_done: static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) { struct msdc_host *host = mmc_priv(mmc); + host->hs400_mode = true; - if (host->top_base) - writel(host->hs400_ds_delay, - host->top_base + EMMC50_PAD_DS_TUNE); - else - writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); + if (host->top_base) { + if (host->hs400_ds_dly3) + sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, + PAD_DS_DLY3, host->hs400_ds_dly3); + if (host->hs400_ds_delay) + writel(host->hs400_ds_delay, + host->top_base + EMMC50_PAD_DS_TUNE); + } else { + if (host->hs400_ds_dly3) + sdr_set_field(host->base + PAD_DS_TUNE, + PAD_DS_TUNE_DLY3, host->hs400_ds_dly3); + if (host->hs400_ds_delay) + writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); + } /* hs400 mode must set it to 0 */ sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS); /* to improve read performance, set outstanding to 2 */ @@ -2319,14 +2331,11 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card if (host->top_base) { sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE, PAD_DS_DLY_SEL); - if (host->hs400_ds_dly3) - sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE, - PAD_DS_DLY3, host->hs400_ds_dly3); + sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE, + PAD_DS_DLY2_SEL); } else { sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL); - if (host->hs400_ds_dly3) - sdr_set_field(host->base + PAD_DS_TUNE, - PAD_DS_TUNE_DLY3, host->hs400_ds_dly3); + sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL); } host->hs400_tuning = true; @@ -2755,7 +2764,7 @@ static int msdc_drv_probe(struct platform_device *pdev) ret = msdc_ungate_clock(host); if (ret) { dev_err(&pdev->dev, "Cannot ungate clocks!\n"); - goto release_mem; + goto release_clk; } msdc_init_hw(host); @@ -2765,14 +2774,14 @@ static int msdc_drv_probe(struct platform_device *pdev) GFP_KERNEL); if (!host->cq_host) { ret = -ENOMEM; - goto host_free; + goto release; } host->cq_host->caps |= CQHCI_TASK_DESC_SZ_128; host->cq_host->mmio = host->base + 0x800; host->cq_host->ops = &msdc_cmdq_ops; ret = cqhci_init(host->cq_host, mmc, true); if (ret) - goto host_free; + goto release; mmc->max_segs = 128; /* cqhci 16bit length */ /* 0 size, means 65536 so we don't have to -1 here */ @@ -2797,9 +2806,10 @@ static int msdc_drv_probe(struct platform_device *pdev) end: pm_runtime_disable(host->dev); release: - platform_set_drvdata(pdev, NULL); msdc_deinit_hw(host); +release_clk: msdc_gate_clock(host); + platform_set_drvdata(pdev, NULL); release_mem: if (host->dma.gpd) dma_free_coherent(&pdev->dev, diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 7572c5714b46..d0a15645a0b8 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -1109,26 +1109,26 @@ int renesas_sdhi_probe(struct platform_device *pdev, num_irqs = platform_irq_count(pdev); if (num_irqs < 0) { ret = num_irqs; - goto eirq; + goto edisclk; } /* There must be at least one IRQ source */ if (!num_irqs) { ret = -ENXIO; - goto eirq; + goto edisclk; } for (i = 0; i < num_irqs; i++) { irq = platform_get_irq(pdev, i); if (irq < 0) { ret = irq; - goto eirq; + goto edisclk; } ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, dev_name(&pdev->dev), host); if (ret) - goto eirq; + goto edisclk; } ret = tmio_mmc_host_probe(host); @@ -1140,8 +1140,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, return ret; -eirq: - tmio_mmc_host_remove(host); edisclk: renesas_sdhi_clk_disable(host); efree: diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 5b650d5dd0ae..5a7ee8a390b0 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -396,8 +396,15 @@ static int sdhci_brcmstb_suspend(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; clk_disable_unprepare(priv->base_clk); + if (host->mmc->caps2 & MMC_CAP2_CQE) { + ret = cqhci_suspend(host->mmc); + if (ret) + return ret; + } + return sdhci_pltfm_suspend(dev); } @@ -422,6 +429,9 @@ static int sdhci_brcmstb_resume(struct device *dev) ret = clk_set_rate(priv->base_clk, priv->base_freq_hz); } + if (host->mmc->caps2 & MMC_CAP2_CQE) + ret = cqhci_resume(host->mmc); + return ret; } #endif diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 28bd562c439e..c8488b8e2073 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -132,9 +132,18 @@ /* Timeout value to avoid infinite waiting for pwr_irq */ #define MSM_PWR_IRQ_TIMEOUT_MS 5000 +/* Max load for eMMC Vdd supply */ +#define MMC_VMMC_MAX_LOAD_UA 570000 + /* Max load for eMMC Vdd-io supply */ #define MMC_VQMMC_MAX_LOAD_UA 325000 +/* Max load for SD Vdd supply */ +#define SD_VMMC_MAX_LOAD_UA 800000 + +/* Max load for SD Vdd-io supply */ +#define SD_VQMMC_MAX_LOAD_UA 22000 + #define msm_host_readl(msm_host, host, offset) \ msm_host->var_ops->msm_readl_relaxed(host, offset) @@ -1399,11 +1408,48 @@ static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level) return ret; } -static int sdhci_msm_set_vmmc(struct mmc_host *mmc) +static void msm_config_vmmc_regulator(struct mmc_host *mmc, bool hpm) +{ + int load; + + if (!hpm) + load = 0; + else if (!mmc->card) + load = max(MMC_VMMC_MAX_LOAD_UA, SD_VMMC_MAX_LOAD_UA); + else if (mmc_card_mmc(mmc->card)) + load = MMC_VMMC_MAX_LOAD_UA; + else if (mmc_card_sd(mmc->card)) + load = SD_VMMC_MAX_LOAD_UA; + else + return; + + regulator_set_load(mmc->supply.vmmc, load); +} + +static void msm_config_vqmmc_regulator(struct mmc_host *mmc, bool hpm) +{ + int load; + + if (!hpm) + load = 0; + else if (!mmc->card) + load = max(MMC_VQMMC_MAX_LOAD_UA, SD_VQMMC_MAX_LOAD_UA); + else if (mmc_card_sd(mmc->card)) + load = SD_VQMMC_MAX_LOAD_UA; + else + return; + + regulator_set_load(mmc->supply.vqmmc, load); +} + +static int sdhci_msm_set_vmmc(struct sdhci_msm_host *msm_host, + struct mmc_host *mmc, bool hpm) { if (IS_ERR(mmc->supply.vmmc)) return 0; + msm_config_vmmc_regulator(mmc, hpm); + return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd); } @@ -1416,6 +1462,8 @@ static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host, if (msm_host->vqmmc_enabled == level) return 0; + msm_config_vqmmc_regulator(mmc, level); + if (level) { /* Set the IO voltage regulator to default voltage level */ if (msm_host->caps_0 & CORE_3_0V_SUPPORT) @@ -1638,7 +1686,8 @@ static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) } if (pwr_state) { - ret = sdhci_msm_set_vmmc(mmc); + ret = sdhci_msm_set_vmmc(msm_host, mmc, + pwr_state & REQ_BUS_ON); if (!ret) ret = sdhci_msm_set_vqmmc(msm_host, mmc, pwr_state & REQ_BUS_ON); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index bfb7c8b96341..bb56625cfc85 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1340,8 +1340,8 @@ static int sdhci_omap_probe(struct platform_device *pdev) /* R1B responses is required to properly manage HW busy detection. */ mmc->caps |= MMC_CAP_NEED_RSP_BUSY; - /* Allow card power off and runtime PM for eMMC/SD card devices */ - mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM; + /* Enable SDIO card power off. */ + mmc->caps |= MMC_CAP_POWER_OFF_CARD; ret = sdhci_setup_host(host); if (ret) diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index bda9e10df250..c71d9956b398 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -612,8 +613,12 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, sdhci_set_power(host, mode, vdd); - if (mode == MMC_POWER_OFF) + if (mode == MMC_POWER_OFF) { + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD || + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BYT_SD) + usleep_range(15000, 17500); return; + } /* * Bus power might not enable after D3 -> D0 transition due to the @@ -1239,6 +1244,29 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { .priv_size = sizeof(struct intel_host), }; +/* DMI quirks for devices with missing or broken CD GPIO info */ +static const struct gpiod_lookup_table vexia_edu_atla10_cd_gpios = { + .dev_id = "0000:00:12.0", + .table = { + GPIO_LOOKUP("INT33FC:00", 38, "cd", GPIO_ACTIVE_HIGH), + { } + }, +}; + +static const struct dmi_system_id sdhci_intel_byt_cd_gpio_override[] = { + { + /* Vexia Edu Atla 10 tablet 9V version */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Above strings are too generic, also match on BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"), + }, + .driver_data = (void *)&vexia_edu_atla10_cd_gpios, + }, + { } +}; + static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { #ifdef CONFIG_PM_SLEEP .resume = byt_resume, @@ -1257,6 +1285,7 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { .add_host = byt_add_host, .remove_slot = byt_remove_slot, .ops = &sdhci_intel_byt_ops, + .cd_gpio_override = sdhci_intel_byt_cd_gpio_override, .priv_size = sizeof(struct intel_host), }; @@ -2036,6 +2065,42 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = { * * \*****************************************************************************/ +static struct gpiod_lookup_table *sdhci_pci_add_gpio_lookup_table( + struct sdhci_pci_chip *chip) +{ + struct gpiod_lookup_table *cd_gpio_lookup_table; + const struct dmi_system_id *dmi_id = NULL; + size_t count; + + if (chip->fixes && chip->fixes->cd_gpio_override) + dmi_id = dmi_first_match(chip->fixes->cd_gpio_override); + + if (!dmi_id) + return NULL; + + cd_gpio_lookup_table = dmi_id->driver_data; + for (count = 0; cd_gpio_lookup_table->table[count].key; count++) + ; + + cd_gpio_lookup_table = kmemdup(dmi_id->driver_data, + /* count + 1 terminating entry */ + struct_size(cd_gpio_lookup_table, table, count + 1), + GFP_KERNEL); + if (!cd_gpio_lookup_table) + return ERR_PTR(-ENOMEM); + + gpiod_add_lookup_table(cd_gpio_lookup_table); + return cd_gpio_lookup_table; +} + +static void sdhci_pci_remove_gpio_lookup_table(struct gpiod_lookup_table *lookup_table) +{ + if (lookup_table) { + gpiod_remove_lookup_table(lookup_table); + kfree(lookup_table); + } +} + static struct sdhci_pci_slot *sdhci_pci_probe_slot( struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, int slotno) @@ -2111,8 +2176,19 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( device_init_wakeup(&pdev->dev, true); if (slot->cd_idx >= 0) { + struct gpiod_lookup_table *cd_gpio_lookup_table; + + cd_gpio_lookup_table = sdhci_pci_add_gpio_lookup_table(chip); + if (IS_ERR(cd_gpio_lookup_table)) { + ret = PTR_ERR(cd_gpio_lookup_table); + goto remove; + } + ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, slot->cd_override_level, 0); + + sdhci_pci_remove_gpio_lookup_table(cd_gpio_lookup_table); + if (ret && ret != -EPROBE_DEFER) ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 3661a224fb04..242c667d50e3 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -151,6 +151,7 @@ struct sdhci_pci_fixes { #endif const struct sdhci_ops *ops; + const struct dmi_system_id *cd_gpio_override; size_t priv_size; }; diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index a6d89a3f1946..e59bfd316305 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -401,6 +401,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (!IS_ERR(pxa->clk_core)) clk_prepare_enable(pxa->clk_core); + host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY; /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 62d236bfe937..3d6af49e9180 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -1520,7 +1520,6 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | - SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_ISSUE_CMD_DAT_RESET_TOGETHER, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 536d21028a11..6822a3249286 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2049,10 +2049,15 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; - sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + if (clk & SDHCI_CLOCK_CARD_EN) + sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, + SDHCI_CLOCK_CONTROL); - if (clock == 0) + if (clock == 0) { + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); return; + } clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); sdhci_enable_clk(host, clk); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 69dcb8805e05..e7f3240a53d8 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1191,10 +1191,9 @@ static const struct sunxi_mmc_cfg sun50i_a64_emmc_cfg = { .needs_new_timings = true, }; -static const struct sunxi_mmc_cfg sun50i_a100_cfg = { +static const struct sunxi_mmc_cfg sun50i_h616_cfg = { .idma_des_size_bits = 16, .idma_des_shift = 2, - .clk_delays = NULL, .can_calibrate = true, .mask_data0 = true, .needs_new_timings = true, @@ -1217,8 +1216,9 @@ static const struct of_device_id sunxi_mmc_of_match[] = { { .compatible = "allwinner,sun20i-d1-mmc", .data = &sun20i_d1_cfg }, { .compatible = "allwinner,sun50i-a64-mmc", .data = &sun50i_a64_cfg }, { .compatible = "allwinner,sun50i-a64-emmc", .data = &sun50i_a64_emmc_cfg }, - { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun50i_a100_cfg }, + { .compatible = "allwinner,sun50i-a100-mmc", .data = &sun20i_d1_cfg }, { .compatible = "allwinner,sun50i-a100-emmc", .data = &sun50i_a100_emmc_cfg }, + { .compatible = "allwinner,sun50i-h616-mmc", .data = &sun50i_h616_cfg }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c index a6161ce340d4..4b6cbee23fe8 100644 --- a/drivers/mtd/hyperbus/hbmc-am654.c +++ b/drivers/mtd/hyperbus/hbmc-am654.c @@ -174,26 +174,30 @@ static int am654_hbmc_probe(struct platform_device *pdev) priv->hbdev.np = of_get_next_child(np, NULL); ret = of_address_to_resource(priv->hbdev.np, 0, &res); if (ret) - return ret; + goto put_node; if (of_property_read_bool(dev->of_node, "mux-controls")) { struct mux_control *control = devm_mux_control_get(dev, NULL); - if (IS_ERR(control)) - return PTR_ERR(control); + if (IS_ERR(control)) { + ret = PTR_ERR(control); + goto put_node; + } ret = mux_control_select(control, 1); if (ret) { dev_err(dev, "Failed to select HBMC mux\n"); - return ret; + goto put_node; } priv->mux_ctrl = control; } priv->hbdev.map.size = resource_size(&res); priv->hbdev.map.virt = devm_ioremap_resource(dev, &res); - if (IS_ERR(priv->hbdev.map.virt)) - return PTR_ERR(priv->hbdev.map.virt); + if (IS_ERR(priv->hbdev.map.virt)) { + ret = PTR_ERR(priv->hbdev.map.virt); + goto disable_mux; + } priv->ctlr.dev = dev; priv->ctlr.ops = &am654_hbmc_ops; @@ -226,10 +230,12 @@ release_dma: disable_mux: if (priv->mux_ctrl) mux_control_deselect(priv->mux_ctrl); +put_node: + of_node_put(priv->hbdev.np); return ret; } -static int am654_hbmc_remove(struct platform_device *pdev) +static void am654_hbmc_remove(struct platform_device *pdev) { struct am654_hbmc_priv *priv = platform_get_drvdata(pdev); struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv; @@ -241,8 +247,7 @@ static int am654_hbmc_remove(struct platform_device *pdev) if (dev_priv->rx_chan) dma_release_channel(dev_priv->rx_chan); - - return 0; + of_node_put(priv->hbdev.np); } static const struct of_device_id am654_hbmc_dt_ids[] = { @@ -256,7 +261,7 @@ MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids); static struct platform_driver am654_hbmc_platform_driver = { .probe = am654_hbmc_probe, - .remove = am654_hbmc_remove, + .remove_new = am654_hbmc_remove, .driver = { .name = "hbmc-am654", .of_match_table = am654_hbmc_dt_ids, diff --git a/drivers/mtd/hyperbus/rpc-if.c b/drivers/mtd/hyperbus/rpc-if.c index d00d30243403..e7a28f3316c3 100644 --- a/drivers/mtd/hyperbus/rpc-if.c +++ b/drivers/mtd/hyperbus/rpc-if.c @@ -56,7 +56,7 @@ static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to, op.data.nbytes = len; op.data.buf.in = to; - rpcif_prepare(rpc, &op, NULL, NULL); + rpcif_prepare(rpc->dev, &op, NULL, NULL); } static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to, @@ -70,7 +70,7 @@ static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to, op.data.nbytes = len; op.data.buf.out = from; - rpcif_prepare(rpc, &op, NULL, NULL); + rpcif_prepare(rpc->dev, &op, NULL, NULL); } static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr) @@ -81,7 +81,7 @@ static u16 rpcif_hb_read16(struct hyperbus_device *hbdev, unsigned long addr) rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2); - rpcif_manual_xfer(&hyperbus->rpc); + rpcif_manual_xfer(hyperbus->rpc.dev); return data.x[0]; } @@ -94,7 +94,7 @@ static void rpcif_hb_write16(struct hyperbus_device *hbdev, unsigned long addr, rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2); - rpcif_manual_xfer(&hyperbus->rpc); + rpcif_manual_xfer(hyperbus->rpc.dev); } static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to, @@ -105,7 +105,7 @@ static void rpcif_hb_copy_from(struct hyperbus_device *hbdev, void *to, rpcif_hb_prepare_read(&hyperbus->rpc, to, from, len); - rpcif_dirmap_read(&hyperbus->rpc, from, len, to); + rpcif_dirmap_read(hyperbus->rpc.dev, from, len, to); } static const struct hyperbus_ops rpcif_hb_ops = { @@ -130,9 +130,9 @@ static int rpcif_hb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, hyperbus); - rpcif_enable_rpm(&hyperbus->rpc); + pm_runtime_enable(hyperbus->rpc.dev); - error = rpcif_hw_init(&hyperbus->rpc, true); + error = rpcif_hw_init(hyperbus->rpc.dev, true); if (error) goto out_disable_rpm; @@ -150,24 +150,29 @@ static int rpcif_hb_probe(struct platform_device *pdev) return 0; out_disable_rpm: - rpcif_disable_rpm(&hyperbus->rpc); + pm_runtime_disable(hyperbus->rpc.dev); return error; } -static int rpcif_hb_remove(struct platform_device *pdev) +static void rpcif_hb_remove(struct platform_device *pdev) { struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev); hyperbus_unregister_device(&hyperbus->hbdev); - rpcif_disable_rpm(&hyperbus->rpc); - - return 0; + pm_runtime_disable(hyperbus->rpc.dev); } +static const struct platform_device_id rpc_if_hyperflash_id_table[] = { + { .name = "rpc-if-hyperflash" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, rpc_if_hyperflash_id_table); + static struct platform_driver rpcif_platform_driver = { .probe = rpcif_hb_probe, - .remove = rpcif_hb_remove, + .remove_new = rpcif_hb_remove, + .id_table = rpc_if_hyperflash_id_table, .driver = { .name = "rpc-if-hyperflash", }, diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c index 58ca1c21ebe6..cd1eeb7a8cf1 100644 --- a/drivers/mtd/inftlcore.c +++ b/drivers/mtd/inftlcore.c @@ -482,10 +482,11 @@ static inline u16 INFTL_findwriteunit(struct INFTLrecord *inftl, unsigned block) silly = MAX_LOOPS; while (thisEUN <= inftl->lastEUN) { - inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + - blockofs, 8, &retlen, (char *)&bci); - - status = bci.Status | bci.Status1; + if (inftl_read_oob(mtd, (thisEUN * inftl->EraseSize) + + blockofs, 8, &retlen, (char *)&bci) < 0) + status = SECTOR_IGNORE; + else + status = bci.Status | bci.Status1; pr_debug("INFTL: status of block %d in EUN %d is %x\n", block , writeEUN, status); diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c index 7ac8ac901306..9cf3872e37ae 100644 --- a/drivers/mtd/mtdpstore.c +++ b/drivers/mtd/mtdpstore.c @@ -417,11 +417,14 @@ static void mtdpstore_notify_add(struct mtd_info *mtd) } longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size)); - cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); - cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); + cxt->rmmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); + cxt->usedmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize)); - cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL); + cxt->badmap = devm_kcalloc(&mtd->dev, longcnt, sizeof(long), GFP_KERNEL); + + if (!cxt->rmmap || !cxt->usedmap || !cxt->badmap) + return; /* just support dmesg right now */ cxt->dev.flags = PSTORE_FLAGS_DMESG; @@ -527,9 +530,6 @@ static void mtdpstore_notify_remove(struct mtd_info *mtd) mtdpstore_flush_removed(cxt); unregister_pstore_device(&cxt->dev); - kfree(cxt->badmap); - kfree(cxt->usedmap); - kfree(cxt->rmmap); cxt->mtd = NULL; cxt->index = -1; } diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c index f66385faf631..0dc2ea4fc857 100644 --- a/drivers/mtd/nand/onenand/onenand_base.c +++ b/drivers/mtd/nand/onenand/onenand_base.c @@ -2923,6 +2923,7 @@ static int do_otp_read(struct mtd_info *mtd, loff_t from, size_t len, ret = ONENAND_IS_4KB_PAGE(this) ? onenand_mlc_read_ops_nolock(mtd, from, &ops) : onenand_read_ops_nolock(mtd, from, &ops); + *retlen = ops.retlen; /* Exit OTP access mode */ this->command(mtd, ONENAND_CMD_RESET, 0, 0); diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index e6ffe87a599e..864c0524c9ea 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -1425,8 +1425,8 @@ static int anfc_parse_cs(struct arasan_nfc *nfc) * case, the "not" chosen CS is assigned to nfc->spare_cs and selected * whenever a GPIO CS must be asserted. */ - if (nfc->cs_array && nfc->ncs > 2) { - if (!nfc->cs_array[0] && !nfc->cs_array[1]) { + if (nfc->cs_array) { + if (nfc->ncs > 2 && !nfc->cs_array[0] && !nfc->cs_array[1]) { dev_err(nfc->dev, "Assign a single native CS when using GPIOs\n"); return -EINVAL; @@ -1510,8 +1510,15 @@ disable_controller_clk: static int anfc_remove(struct platform_device *pdev) { + int i; struct arasan_nfc *nfc = platform_get_drvdata(pdev); + for (i = 0; i < nfc->ncs; i++) { + if (nfc->cs_array[i]) { + gpiod_put(nfc->cs_array[i]); + } + } + anfc_chips_cleanup(nfc); clk_disable_unprepare(nfc->bus_clk); diff --git a/drivers/mtd/nand/raw/atmel/pmecc.c b/drivers/mtd/nand/raw/atmel/pmecc.c index 4d7dc8a9c373..3c7dee1be21d 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.c +++ b/drivers/mtd/nand/raw/atmel/pmecc.c @@ -362,7 +362,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, size = ALIGN(size, sizeof(s32)); size += (req->ecc.strength + 1) * sizeof(s32) * 3; - user = kzalloc(size, GFP_KERNEL); + user = devm_kzalloc(pmecc->dev, size, GFP_KERNEL); if (!user) return ERR_PTR(-ENOMEM); @@ -380,10 +380,8 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, user->delta = user->dmu + req->ecc.strength + 1; gf_tables = atmel_pmecc_get_gf_tables(req); - if (IS_ERR(gf_tables)) { - kfree(user); + if (IS_ERR(gf_tables)) return ERR_CAST(gf_tables); - } user->gf_tables = gf_tables; @@ -408,12 +406,6 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, } EXPORT_SYMBOL_GPL(atmel_pmecc_create_user); -void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user) -{ - kfree(user); -} -EXPORT_SYMBOL_GPL(atmel_pmecc_destroy_user); - static int get_strength(struct atmel_pmecc_user *user) { const int *strengths = user->pmecc->caps->strengths; diff --git a/drivers/mtd/nand/raw/atmel/pmecc.h b/drivers/mtd/nand/raw/atmel/pmecc.h index 7851c05126cf..cc0c5af1f4f1 100644 --- a/drivers/mtd/nand/raw/atmel/pmecc.h +++ b/drivers/mtd/nand/raw/atmel/pmecc.h @@ -55,8 +55,6 @@ struct atmel_pmecc *devm_atmel_pmecc_get(struct device *dev); struct atmel_pmecc_user * atmel_pmecc_create_user(struct atmel_pmecc *pmecc, struct atmel_pmecc_user_req *req); -void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); - void atmel_pmecc_reset(struct atmel_pmecc *pmecc); int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); void atmel_pmecc_disable(struct atmel_pmecc_user *user); diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 39661e23d7d4..20c6aeef107c 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -2968,7 +2968,7 @@ static int brcmnand_resume(struct device *dev) brcmnand_save_restore_cs_config(host, 1); /* Reset the chip, required by some chips after power-up */ - nand_reset_op(chip); + nand_reset(chip, 0); } return 0; diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index 9dac3ca69d57..7d686a96d44c 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -469,6 +469,8 @@ struct cdns_nand_ctrl { struct { void __iomem *virt; dma_addr_t dma; + dma_addr_t iova_dma; + u32 size; } io; int irq; @@ -1830,11 +1832,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, } if (dir == DMA_FROM_DEVICE) { - src_dma = cdns_ctrl->io.dma; + src_dma = cdns_ctrl->io.iova_dma; dst_dma = buf_dma; } else { src_dma = buf_dma; - dst_dma = cdns_ctrl->io.dma; + dst_dma = cdns_ctrl->io.iova_dma; } tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len, @@ -1856,12 +1858,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl, dma_async_issue_pending(cdns_ctrl->dmac); wait_for_completion(&finished); - dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + dma_unmap_single(dma_dev->dev, buf_dma, len, dir); return 0; err_unmap: - dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir); + dma_unmap_single(dma_dev->dev, buf_dma, len, dir); err: dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n"); @@ -2828,6 +2830,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl) static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) { dma_cap_mask_t mask; + struct dma_device *dma_dev = cdns_ctrl->dmac->device; int ret; cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev, @@ -2863,15 +2866,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) dma_cap_set(DMA_MEMCPY, mask); if (cdns_ctrl->caps1->has_dma) { - cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL); - if (!cdns_ctrl->dmac) { - dev_err(cdns_ctrl->dev, - "Unable to get a DMA channel\n"); - ret = -EBUSY; + cdns_ctrl->dmac = dma_request_chan_by_mask(&mask); + if (IS_ERR(cdns_ctrl->dmac)) { + ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac), + "%d: Failed to get a DMA channel\n", ret); goto disable_irq; } } + cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma, + cdns_ctrl->io.size, + DMA_BIDIRECTIONAL, 0); + + ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma); + if (ret) { + dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n"); + goto dma_release_chnl; + } + nand_controller_init(&cdns_ctrl->controller); INIT_LIST_HEAD(&cdns_ctrl->chips); @@ -2882,18 +2894,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl) if (ret) { dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n", ret); - goto dma_release_chnl; + goto unmap_dma_resource; } kfree(cdns_ctrl->buf); cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL); if (!cdns_ctrl->buf) { ret = -ENOMEM; - goto dma_release_chnl; + goto unmap_dma_resource; } return 0; +unmap_dma_resource: + dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma, + cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0); + dma_release_chnl: if (cdns_ctrl->dmac) dma_release_channel(cdns_ctrl->dmac); @@ -2915,6 +2931,8 @@ free_buf_desc: static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl) { cadence_nand_chips_cleanup(cdns_ctrl); + dma_unmap_resource(cdns_ctrl->dmac->device->dev, cdns_ctrl->io.iova_dma, + cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0); cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl); kfree(cdns_ctrl->buf); dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc), @@ -2983,7 +3001,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev) cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res); if (IS_ERR(cdns_ctrl->io.virt)) return PTR_ERR(cdns_ctrl->io.virt); + cdns_ctrl->io.dma = res->start; + cdns_ctrl->io.size = resource_size(res); dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk"); if (IS_ERR(dt->clk)) diff --git a/drivers/mtd/nand/raw/diskonchip.c b/drivers/mtd/nand/raw/diskonchip.c index 2068025d5639..594e13a852c4 100644 --- a/drivers/mtd/nand/raw/diskonchip.c +++ b/drivers/mtd/nand/raw/diskonchip.c @@ -1098,7 +1098,7 @@ static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partiti (i == 0) && (ip->firstUnit > 0)) { parts[0].name = " DiskOnChip IPL / Media Header partition"; parts[0].offset = 0; - parts[0].size = mtd->erasesize * ip->firstUnit; + parts[0].size = (uint64_t)mtd->erasesize * ip->firstUnit; numparts = 1; } diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index ed0cf732d20e..36cfe03cd4ac 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -387,6 +387,9 @@ static int r852_wait(struct nand_chip *chip) static int r852_ready(struct nand_chip *chip) { struct r852_device *dev = r852_get_dev(nand_to_mtd(chip)); + if (dev->card_unstable) + return 0; + return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); } diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c index ae5abe492b52..adc47b87b38a 100644 --- a/drivers/mtd/ubi/attach.c +++ b/drivers/mtd/ubi/attach.c @@ -1447,7 +1447,7 @@ out_ech: return err; } -static struct ubi_attach_info *alloc_ai(void) +static struct ubi_attach_info *alloc_ai(const char *slab_name) { struct ubi_attach_info *ai; @@ -1461,7 +1461,7 @@ static struct ubi_attach_info *alloc_ai(void) INIT_LIST_HEAD(&ai->alien); INIT_LIST_HEAD(&ai->fastmap); ai->volumes = RB_ROOT; - ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache", + ai->aeb_slab_cache = kmem_cache_create(slab_name, sizeof(struct ubi_ainf_peb), 0, 0, NULL); if (!ai->aeb_slab_cache) { @@ -1491,7 +1491,7 @@ static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai) err = -ENOMEM; - scan_ai = alloc_ai(); + scan_ai = alloc_ai("ubi_aeb_slab_cache_fastmap"); if (!scan_ai) goto out; @@ -1557,7 +1557,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan) int err; struct ubi_attach_info *ai; - ai = alloc_ai(); + ai = alloc_ai("ubi_aeb_slab_cache"); if (!ai) return -ENOMEM; @@ -1575,7 +1575,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan) if (err > 0 || mtd_is_eccerr(err)) { if (err != UBI_NO_FASTMAP) { destroy_ai(ai); - ai = alloc_ai(); + ai = alloc_ai("ubi_aeb_slab_cache"); if (!ai) return -ENOMEM; @@ -1614,7 +1614,7 @@ int ubi_attach(struct ubi_device *ubi, int force_scan) if (ubi->fm && ubi_dbg_chk_fastmap(ubi)) { struct ubi_attach_info *scan_ai; - scan_ai = alloc_ai(); + scan_ai = alloc_ai("ubi_aeb_slab_cache_dbg_chk_fastmap"); if (!scan_ai) { err = -ENOMEM; goto out_wl; diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 863f571f1adb..79733163ab7d 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -282,14 +282,27 @@ out: * WL sub-system. * * @ubi: UBI device description object + * @need_fill: whether to fill wear-leveling pool when no PEBs are found */ -static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi) +static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi, + bool need_fill) { struct ubi_fm_pool *pool = &ubi->fm_wl_pool; int pnum; - if (pool->used == pool->size) + if (pool->used == pool->size) { + if (need_fill && !ubi->fm_work_scheduled) { + /* + * We cannot update the fastmap here because this + * function is called in atomic context. + * Let's fail here and refill/update it as soon as + * possible. + */ + ubi->fm_work_scheduled = 1; + schedule_work(&ubi->fm_work); + } return NULL; + } pnum = pool->pebs[pool->used]; return ubi->lookuptbl[pnum]; @@ -311,7 +324,7 @@ static bool need_wear_leveling(struct ubi_device *ubi) if (!ubi->used.rb_node) return false; - e = next_peb_for_wl(ubi); + e = next_peb_for_wl(ubi, false); if (!e) { if (!ubi->free.rb_node) return false; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 6049ab9e4647..c1d3472cdc9a 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -671,7 +671,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ubi_assert(!ubi->move_to_put); #ifdef CONFIG_MTD_UBI_FASTMAP - if (!next_peb_for_wl(ubi) || + if (!next_peb_for_wl(ubi, true) || #else if (!ubi->free.rb_node || #endif @@ -834,7 +834,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, goto out_not_moved; } if (err == MOVE_RETRY) { - scrubbing = 1; + /* + * For source PEB: + * 1. The scrubbing is set for scrub type PEB, it will + * be put back into ubi->scrub list. + * 2. Non-scrub type PEB will be put back into ubi->used + * list. + */ + keep = 1; dst_leb_clean = 1; goto out_not_moved; } diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h index 5ebe374a08ae..1d83e552533a 100644 --- a/drivers/mtd/ubi/wl.h +++ b/drivers/mtd/ubi/wl.h @@ -5,7 +5,8 @@ static void update_fastmap_work_fn(struct work_struct *wrk); static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi); -static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi); +static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi, + bool need_fill); static bool need_wear_leveling(struct ubi_device *ubi); static void ubi_fastmap_close(struct ubi_device *ubi); static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index 7b5c8bb02f11..e7db6a4e4dc9 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -250,18 +250,33 @@ static int com20020pci_probe(struct pci_dev *pdev, card->tx_led.default_trigger = devm_kasprintf(&pdev->dev, GFP_KERNEL, "arc%d-%d-tx", dev->dev_id, i); + if (!card->tx_led.default_trigger) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->tx_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pci:green:tx:%d-%d", dev->dev_id, i); - + if (!card->tx_led.name) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->tx_led.dev = &dev->dev; card->recon_led.brightness_set = led_recon_set; card->recon_led.default_trigger = devm_kasprintf(&pdev->dev, GFP_KERNEL, "arc%d-%d-recon", dev->dev_id, i); + if (!card->recon_led.default_trigger) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->recon_led.name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pci:red:recon:%d-%d", dev->dev_id, i); + if (!card->recon_led.name) { + ret = -ENOMEM; + goto err_free_arcdev; + } card->recon_led.dev = &dev->dev; ret = devm_led_classdev_register(&pdev->dev, &card->tx_led); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 51d6cf0a3fb4..3cedadef9c8a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -919,6 +919,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, if (bond->dev->flags & IFF_UP) bond_hw_addr_flush(bond->dev, old_active->dev); + + bond_slave_ns_maddrs_add(bond, old_active); } if (new_active) { @@ -935,6 +937,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, dev_mc_sync(new_active->dev, bond->dev); netif_addr_unlock_bh(bond->dev); } + + bond_slave_ns_maddrs_del(bond, new_active); } } @@ -1450,6 +1454,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev, #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ + NETIF_F_GSO_ENCAP_ALL | \ NETIF_F_HIGHDMA | NETIF_F_LRO) #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ @@ -2231,6 +2236,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bond_compute_features(bond); bond_set_carrier(bond); + /* Needs to be called before bond_select_active_slave(), which will + * remove the maddrs if the slave is selected as active slave. + */ + bond_slave_ns_maddrs_add(bond, new_slave); + if (bond_uses_primary(bond)) { block_netpoll_tx(); bond_select_active_slave(bond); @@ -2240,7 +2250,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (bond_mode_can_use_xmit_hash(bond)) bond_update_slave_arr(bond, NULL); - if (!slave_dev->netdev_ops->ndo_bpf || !slave_dev->netdev_ops->ndo_xdp_xmit) { if (bond->xdp_prog) { @@ -2422,7 +2431,7 @@ static int __bond_release_one(struct net_device *bond_dev, RCU_INIT_POINTER(bond->current_arp_slave, NULL); - if (!all && (!bond->params.fail_over_mac || + if (!all && (bond->params.fail_over_mac != BOND_FOM_ACTIVE || BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) { if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) && bond_has_slaves(bond)) @@ -2436,6 +2445,12 @@ static int __bond_release_one(struct net_device *bond_dev, if (oldcurrent == slave) bond_change_active_slave(bond, NULL); + /* Must be called after bond_change_active_slave () as the slave + * might change from an active slave to a backup slave. Then it is + * necessary to clear the maddrs on the backup slave. + */ + bond_slave_ns_maddrs_del(bond, slave); + if (bond_is_lb(bond)) { /* Must be called only after the slave has been * detached from the list and the curr_active_slave diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 06c4cd0f0002..21ca95cdef42 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -15,6 +15,7 @@ #include #include +#include static int bond_option_active_slave_set(struct bonding *bond, const struct bond_opt_value *newval); @@ -1230,6 +1231,107 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond, } #if IS_ENABLED(CONFIG_IPV6) +static bool slave_can_set_ns_maddr(const struct bonding *bond, struct slave *slave) +{ + return BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP && + !bond_is_active_slave(slave) && + slave->dev->flags & IFF_MULTICAST; +} + +/** + * slave_set_ns_maddrs - add/del all NS mac addresses for slave + * @bond: bond device + * @slave: slave device + * @add: add or remove all the NS mac addresses + * + * This function tries to add or delete all the NS mac addresses on the slave + * + * Note, the IPv6 NS target address is the unicast address in Neighbor + * Solicitation (NS) message. The dest address of NS message should be + * solicited-node multicast address of the target. The dest mac of NS message + * is converted from the solicited-node multicast address. + * + * This function is called when + * * arp_validate changes + * * enslaving, releasing new slaves + */ +static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) +{ + struct in6_addr *targets = bond->params.ns_targets; + char slot_maddr[MAX_ADDR_LEN]; + struct in6_addr mcaddr; + int i; + + if (!slave_can_set_ns_maddr(bond, slave)) + return; + + for (i = 0; i < BOND_MAX_NS_TARGETS; i++) { + if (ipv6_addr_any(&targets[i])) + break; + + addrconf_addr_solict_mult(&targets[i], &mcaddr); + if (!ndisc_mc_map(&mcaddr, slot_maddr, slave->dev, 0)) { + if (add) + dev_mc_add(slave->dev, slot_maddr); + else + dev_mc_del(slave->dev, slot_maddr); + } + } +} + +void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) +{ + if (!bond->params.arp_validate) + return; + slave_set_ns_maddrs(bond, slave, true); +} + +void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) +{ + if (!bond->params.arp_validate) + return; + slave_set_ns_maddrs(bond, slave, false); +} + +/** + * slave_set_ns_maddr - set new NS mac address for slave + * @bond: bond device + * @slave: slave device + * @target: the new IPv6 target + * @slot: the old IPv6 target in the slot + * + * This function tries to replace the old mac address to new one on the slave. + * + * Note, the target/slot IPv6 address is the unicast address in Neighbor + * Solicitation (NS) message. The dest address of NS message should be + * solicited-node multicast address of the target. The dest mac of NS message + * is converted from the solicited-node multicast address. + * + * This function is called when + * * An IPv6 NS target is added or removed. + */ +static void slave_set_ns_maddr(struct bonding *bond, struct slave *slave, + struct in6_addr *target, struct in6_addr *slot) +{ + char mac_addr[MAX_ADDR_LEN]; + struct in6_addr mcast_addr; + + if (!bond->params.arp_validate || !slave_can_set_ns_maddr(bond, slave)) + return; + + /* remove the previous mac addr from slave */ + addrconf_addr_solict_mult(slot, &mcast_addr); + if (!ipv6_addr_any(slot) && + !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0)) + dev_mc_del(slave->dev, mac_addr); + + /* add new mac addr on slave if target is set */ + addrconf_addr_solict_mult(target, &mcast_addr); + if (!ipv6_addr_any(target) && + !ndisc_mc_map(&mcast_addr, mac_addr, slave->dev, 0)) + dev_mc_add(slave->dev, mac_addr); +} + static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, struct in6_addr *target, unsigned long last_rx) @@ -1239,8 +1341,10 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, struct slave *slave; if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) { - bond_for_each_slave(bond, slave, iter) + bond_for_each_slave(bond, slave, iter) { slave->target_last_arp_rx[slot] = last_rx; + slave_set_ns_maddr(bond, slave, target, &targets[slot]); + } targets[slot] = *target; } } @@ -1292,15 +1396,30 @@ static int bond_option_ns_ip6_targets_set(struct bonding *bond, { return -EPERM; } + +static void slave_set_ns_maddrs(struct bonding *bond, struct slave *slave, bool add) {} + +void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave) {} + +void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave) {} #endif static int bond_option_arp_validate_set(struct bonding *bond, const struct bond_opt_value *newval) { + bool changed = !!bond->params.arp_validate != !!newval->value; + struct list_head *iter; + struct slave *slave; + netdev_dbg(bond->dev, "Setting arp_validate to %s (%llu)\n", newval->string, newval->value); bond->params.arp_validate = newval->value; + if (changed) { + bond_for_each_slave(bond, slave, iter) + slave_set_ns_maddrs(bond, slave, !!bond->params.arp_validate); + } + return 0; } diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c index 0b0f234b0b50..a8b9ada7526c 100644 --- a/drivers/net/caif/caif_virtio.c +++ b/drivers/net/caif/caif_virtio.c @@ -745,7 +745,7 @@ err: if (cfv->vr_rx) vdev->vringh_config->del_vrhs(cfv->vdev); - if (cfv->vdev) + if (cfv->vq_tx) vdev->config->del_vqs(cfv->vdev); free_netdev(netdev); return err; diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index 511615dc3341..cc371d0c9f3c 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -1014,49 +1014,57 @@ static int c_can_handle_bus_err(struct net_device *dev, /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_type) { case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); - cf->data[2] |= CAN_ERR_PROT_STUFF; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; stats->rx_errors++; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); - cf->data[2] |= CAN_ERR_PROT_FORM; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; stats->rx_errors++; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; stats->tx_errors++; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT1; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; stats->tx_errors++; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT0; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; stats->tx_errors++; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; stats->rx_errors++; break; default: break; } + if (unlikely(!skb)) + return 0; + netif_receive_skb(skb); return 1; } diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 86e95e9d6533..c29862b3bb1f 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -334,7 +334,7 @@ static int c_can_plat_probe(struct platform_device *pdev) /* Check if we need custom RAMINIT via syscon. Mostly for TI * platforms. Only supported with DT boot. */ - if (np && of_property_read_bool(np, "syscon-raminit")) { + if (np && of_property_present(np, "syscon-raminit")) { u32 id; struct c_can_raminit *raminit = &priv->raminit_sys; @@ -395,15 +395,16 @@ static int c_can_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); - goto exit_free_device; + goto exit_pm_runtime; } dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->base, dev->irq); return 0; -exit_free_device: +exit_pm_runtime: pm_runtime_disable(priv->device); +exit_free_device: free_c_can_dev(dev); exit: dev_err(&pdev->dev, "probe failed\n"); diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c index 64c349fd4600..f65c1a1e05cc 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_base.c +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -867,10 +867,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr) } break; case CAN_STATE_ERROR_ACTIVE: - cf->can_id |= CAN_ERR_CNT; - cf->data[1] = CAN_ERR_CRTL_ACTIVE; - cf->data[6] = bec.txerr; - cf->data[7] = bec.rxerr; + if (skb) { + cf->can_id |= CAN_ERR_CNT; + cf->data[1] = CAN_ERR_CRTL_ACTIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + } break; default: netdev_warn(ndev, "unhandled error state (%d:%s)!\n", diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index f685479eda1b..43125ce96f1a 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -452,7 +452,7 @@ static int can_set_termination(struct net_device *ndev, u16 term) else set = 0; - gpiod_set_value(priv->termination_gpio, set); + gpiod_set_value_cansleep(priv->termination_gpio, set); return 0; } diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 9bdadd716f4e..c25ecf608335 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -2230,14 +2230,19 @@ static int __maybe_unused flexcan_suspend(struct device *device) flexcan_chip_interrupts_disable(dev); + err = flexcan_transceiver_disable(priv); + if (err) + return err; + err = pinctrl_pm_select_sleep_state(device); if (err) return err; } netif_stop_queue(dev); netif_device_detach(dev); + + priv->can.state = CAN_STATE_SLEEPING; } - priv->can.state = CAN_STATE_SLEEPING; return 0; } @@ -2248,7 +2253,6 @@ static int __maybe_unused flexcan_resume(struct device *device) struct flexcan_priv *priv = netdev_priv(dev); int err; - priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); @@ -2262,12 +2266,20 @@ static int __maybe_unused flexcan_resume(struct device *device) if (err) return err; - err = flexcan_chip_start(dev); + err = flexcan_transceiver_enable(priv); if (err) return err; + err = flexcan_chip_start(dev); + if (err) { + flexcan_transceiver_disable(priv); + return err; + } + flexcan_chip_interrupts_enable(dev); } + + priv->can.state = CAN_STATE_ERROR_ACTIVE; } return 0; diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 07eaf724a572..de26f5788ff5 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -391,36 +391,55 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev) return 0; priv->can.can_stats.bus_error++; - stats->rx_errors++; /* Propagate the error condition to the CAN stack. */ skb = alloc_can_err_skb(ndev, &cf); - if (unlikely(!skb)) - return 0; /* Read the error counter register and check for new errors. */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) - cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_OVERLOAD; + } - if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; + } - if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_BIT0; + if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; + } - if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_BIT1; + if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST) { + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; + } - if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_STUFF; + if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } - if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + } - if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) - cf->data[2] |= CAN_ERR_PROT_FORM; + if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST) { + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; + } /* Reset the error counter, ack the IRQ and re-enable the counter. */ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR); @@ -428,6 +447,9 @@ static int ifi_canfd_handle_lec_err(struct net_device *ndev) priv->base + IFI_CANFD_INTERRUPT); writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR); + if (unlikely(!skb)) + return 0; + netif_receive_skb(skb); return 1; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index e77b4b60f4e6..56b0f49c8116 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -607,47 +607,60 @@ static int m_can_handle_lec_err(struct net_device *dev, u32 timestamp = 0; cdev->can.can_stats.bus_error++; - stats->rx_errors++; /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); - if (unlikely(!skb)) - return 0; /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (likely(skb)) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_type) { case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); - cf->data[2] |= CAN_ERR_PROT_STUFF; + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_STUFF; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); - cf->data[2] |= CAN_ERR_PROT_FORM; + stats->rx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_FORM; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_ACK; + stats->tx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT1; + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT1; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); - cf->data[2] |= CAN_ERR_PROT_BIT0; + stats->tx_errors++; + if (likely(skb)) + cf->data[2] |= CAN_ERR_PROT_BIT0; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); - cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; + stats->rx_errors++; + if (likely(skb)) + cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; } + if (unlikely(!skb)) + return 0; + if (cdev->is_peripheral) timestamp = m_can_get_timestamp(cdev); @@ -2034,9 +2047,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register); void m_can_class_unregister(struct m_can_classdev *cdev) { + unregister_candev(cdev->net); if (cdev->is_peripheral) can_rx_offload_del(&cdev->offload); - unregister_candev(cdev->net); } EXPORT_SYMBOL_GPL(m_can_class_unregister); diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index e68291697c33..a1f68b74229e 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -772,22 +772,14 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) } static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, - u32 ch) + u32 ch, u32 rule_entry) { - u32 cfg; - int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES; + int offset, page, num_rules = RCANFD_CHANNEL_NUMRULES; + u32 rule_entry_index = rule_entry % 16; u32 ridx = ch + RCANFD_RFFIFO_IDX; - if (ch == 0) { - start = 0; /* Channel 0 always starts from 0th rule */ - } else { - /* Get number of Channel 0 rules and adjust */ - cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch)); - start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg); - } - /* Enable write access to entry */ - page = RCANFD_GAFL_PAGENUM(start); + page = RCANFD_GAFL_PAGENUM(rule_entry); rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR, (RCANFD_GAFLECTR_AFLPN(gpriv, page) | RCANFD_GAFLECTR_AFLDAE)); @@ -803,13 +795,13 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, offset = RCANFD_C_GAFL_OFFSET; /* Accept all IDs */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, rule_entry_index), 0); /* IDE or RTR is not considered for matching */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, rule_entry_index), 0); /* Any data length accepted */ - rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0); + rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, rule_entry_index), 0); /* Place the msg in corresponding Rx FIFO entry */ - rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start), + rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, rule_entry_index), RCANFD_GAFLP1_GAFLFDP(ridx)); /* Disable write access to page */ @@ -1825,6 +1817,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) unsigned long channels_mask = 0; int err, ch_irq, g_irq; int g_err_irq, g_recc_irq; + u32 rule_entry = 0; bool fdmode = true; /* CAN FD only mode - default */ enum rcanfd_chip_id chip_id; int max_channels; @@ -2003,7 +1996,8 @@ static int rcar_canfd_probe(struct platform_device *pdev) rcar_canfd_configure_tx(gpriv, ch); /* Configure receive rules */ - rcar_canfd_configure_afl_rules(gpriv, ch); + rcar_canfd_configure_afl_rules(gpriv, ch, rule_entry); + rule_entry += RCANFD_CHANNEL_NUMRULES; } /* Configure common interrupts */ diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index aac5956e4a53..26ec3c4e4110 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -399,8 +399,6 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) uint8_t ecc, alc; skb = alloc_can_err_skb(dev, &cf); - if (skb == NULL) - return -ENOMEM; txerr = priv->read_reg(priv, SJA1000_TXERR); rxerr = priv->read_reg(priv, SJA1000_RXERR); @@ -408,8 +406,11 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) if (isrc & IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } + stats->rx_over_errors++; stats->rx_errors++; sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */ @@ -426,7 +427,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) else state = CAN_STATE_ERROR_ACTIVE; } - if (state != CAN_STATE_BUS_OFF) { + if (state != CAN_STATE_BUS_OFF && skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; @@ -434,33 +435,38 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) if (isrc & IRQ_BEI) { /* bus error interrupt */ priv->can.can_stats.bus_error++; - stats->rx_errors++; ecc = priv->read_reg(priv, SJA1000_ECC); + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + /* set error type */ + switch (ecc & ECC_MASK) { + case ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + break; + } - /* set error type */ - switch (ecc & ECC_MASK) { - case ECC_BIT: - cf->data[2] |= CAN_ERR_PROT_BIT; - break; - case ECC_FORM: - cf->data[2] |= CAN_ERR_PROT_FORM; - break; - case ECC_STUFF: - cf->data[2] |= CAN_ERR_PROT_STUFF; - break; - default: - break; + /* set error location */ + cf->data[3] = ecc & ECC_SEG; } - /* set error location */ - cf->data[3] = ecc & ECC_SEG; - /* Error occurred during transmission? */ - if ((ecc & ECC_DIR) == 0) - cf->data[2] |= CAN_ERR_PROT_TX; + if ((ecc & ECC_DIR) == 0) { + stats->tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_TX; + } else { + stats->rx_errors++; + } } if (isrc & IRQ_EPI) { /* error passive interrupt */ @@ -476,8 +482,10 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) netdev_dbg(dev, "arbitration lost interrupt\n"); alc = priv->read_reg(priv, SJA1000_ALC); priv->can.can_stats.arbitration_lost++; - cf->can_id |= CAN_ERR_LOSTARB; - cf->data[0] = alc & 0x1f; + if (skb) { + cf->can_id |= CAN_ERR_LOSTARB; + cf->data[0] = alc & 0x1f; + } } if (state != priv->can.state) { @@ -490,6 +498,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) can_bus_off(dev); } + if (!skb) + return -ENOMEM; + netif_rx(skb); return 0; diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index f4db77007c13..982637f8ea2f 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -71,12 +71,21 @@ MODULE_AUTHOR("Dario Binacchi "); #define SLCAN_CMD_LEN 1 #define SLCAN_SFF_ID_LEN 3 #define SLCAN_EFF_ID_LEN 8 +#define SLCAN_DATA_LENGTH_LEN 1 +#define SLCAN_ERROR_LEN 1 #define SLCAN_STATE_LEN 1 #define SLCAN_STATE_BE_RXCNT_LEN 3 #define SLCAN_STATE_BE_TXCNT_LEN 3 -#define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \ - SLCAN_STATE_BE_RXCNT_LEN + \ - SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_STATE_MSG_LEN (SLCAN_CMD_LEN + \ + SLCAN_STATE_LEN + \ + SLCAN_STATE_BE_RXCNT_LEN + \ + SLCAN_STATE_BE_TXCNT_LEN) +#define SLCAN_ERROR_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_ERROR_LEN + \ + SLCAN_DATA_LENGTH_LEN) +#define SLCAN_FRAME_MSG_LEN_MIN (SLCAN_CMD_LEN + \ + SLCAN_SFF_ID_LEN + \ + SLCAN_DATA_LENGTH_LEN) struct slcan { struct can_priv can; @@ -176,6 +185,9 @@ static void slcan_bump_frame(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; + if (sl->rcount < SLCAN_FRAME_MSG_LEN_MIN) + return; + skb = alloc_can_skb(sl->dev, &cf); if (unlikely(!skb)) { sl->dev->stats.rx_dropped++; @@ -281,7 +293,7 @@ static void slcan_bump_state(struct slcan *sl) return; } - if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN) + if (state == sl->can.state || sl->rcount != SLCAN_STATE_MSG_LEN) return; cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; @@ -328,6 +340,9 @@ static void slcan_bump_err(struct slcan *sl) bool rx_errors = false, tx_errors = false, rx_over_errors = false; int i, len; + if (sl->rcount < SLCAN_ERROR_MSG_LEN_MIN) + return; + /* get len from sanitized ASCII value */ len = cmd[1]; if (len >= '0' && len < '9') @@ -456,8 +471,7 @@ static void slcan_bump(struct slcan *sl) static void slcan_unesc(struct slcan *sl, unsigned char s) { if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ - if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && - sl->rcount > 4) + if (!test_and_clear_bit(SLF_ERROR, &sl->flags)) slcan_bump(sl); sl->rcount = 0; diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index e1b8533a602e..b757555ed4c4 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -671,9 +671,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) tx_state = txerr >= rxerr ? new_state : 0; rx_state = txerr <= rxerr ? new_state : 0; can_change_state(net, cf, tx_state, rx_state); - netif_rx(skb); if (new_state == CAN_STATE_BUS_OFF) { + netif_rx(skb); can_bus_off(net); if (priv->can.restart_ms == 0) { priv->force_quit = 1; @@ -684,6 +684,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; + netif_rx(skb); } } @@ -696,27 +697,38 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id) /* Check for protocol errors */ if (eflag & HI3110_ERR_PROTOCOL_MASK) { skb = alloc_can_err_skb(net, &cf); - if (!skb) - break; + if (skb) + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; priv->can.can_stats.bus_error++; - priv->net->stats.rx_errors++; - if (eflag & HI3110_ERR_BITERR) - cf->data[2] |= CAN_ERR_PROT_BIT; - else if (eflag & HI3110_ERR_FRMERR) - cf->data[2] |= CAN_ERR_PROT_FORM; - else if (eflag & HI3110_ERR_STUFERR) - cf->data[2] |= CAN_ERR_PROT_STUFF; - else if (eflag & HI3110_ERR_CRCERR) - cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; - else if (eflag & HI3110_ERR_ACKERR) - cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + if (eflag & HI3110_ERR_BITERR) { + priv->net->stats.tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_BIT; + } else if (eflag & HI3110_ERR_FRMERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_FORM; + } else if (eflag & HI3110_ERR_STUFERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_STUFF; + } else if (eflag & HI3110_ERR_CRCERR) { + priv->net->stats.rx_errors++; + if (skb) + cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; + } else if (eflag & HI3110_ERR_ACKERR) { + priv->net->stats.tx_errors++; + if (skb) + cf->data[3] |= CAN_ERR_PROT_LOC_ACK; + } - cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); - cf->data[7] = hi3110_read(spi, HI3110_READ_REC); netdev_dbg(priv->net, "Bus Error\n"); - netif_rx(skb); + if (skb) { + cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); + cf->data[7] = hi3110_read(spi, HI3110_READ_REC); + netif_rx(skb); + } } } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index 6fecfe4cd080..21ae3a89924e 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = { .brp_inc = 1, }; +/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of + * [-64,63] for TDCO, indicating a relative TDCO. + * + * Manual tests have shown, that using a relative TDCO configuration + * results in bus off, while an absolute configuration works. + * + * For TDCO use the max value (63) from the data sheet, but 0 as the + * minimum. + */ +static const struct can_tdc_const mcp251xfd_tdc_const = { + .tdcv_min = 0, + .tdcv_max = 63, + .tdco_min = 0, + .tdco_max = 63, + .tdcf_min = 0, + .tdcf_max = 0, +}; + static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) { switch (model) { @@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) { const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.data_bittiming; - u32 val = 0; - s8 tdco; + u32 tdcmod, val = 0; int err; /* CAN Control Register @@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) return err; /* Transmitter Delay Compensation */ - tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1), - -64, 63); - val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, - MCP251XFD_REG_TDC_TDCMOD_AUTO) | - FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco); + if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO) + tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO; + else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL) + tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL; + else + tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED; + + val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) | + FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco); return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); } @@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi) priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; priv->can.bittiming_const = &mcp251xfd_bittiming_const; priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; + priv->can.tdc_const = &mcp251xfd_tdc_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | - CAN_CTRLMODE_CC_LEN8_DLC; + CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO | + CAN_CTRLMODE_TDC_MANUAL; set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); priv->ndev = ndev; priv->spi = spi; @@ -2179,8 +2203,8 @@ static void mcp251xfd_remove(struct spi_device *spi) struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct net_device *ndev = priv->ndev; - can_rx_offload_del(&priv->offload); mcp251xfd_unregister(priv); + can_rx_offload_del(&priv->offload); spi->max_speed_hz = priv->spi_max_speed_hz_orig; free_candev(ndev); } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c index 493daaa0c7a3..54b6002f4942 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -21,6 +21,11 @@ static inline bool mcp251xfd_tx_fifo_sta_empty(u32 fifo_sta) return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF; } +static inline bool mcp251xfd_tx_fifo_sta_less_than_half_full(u32 fifo_sta) +{ + return fifo_sta & MCP251XFD_REG_FIFOSTA_TFHRFHIF; +} + static inline int mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, u8 *tef_tail) @@ -147,7 +152,29 @@ mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p) BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); len = (chip_tx_tail << shift) - (tail << shift); - *len_p = len >> shift; + len >>= shift; + + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the TX-FIFO tail index + * might be corrupted. + * + * However here it seems the bit indicating that the TX-FIFO + * is empty (MCP251XFD_REG_FIFOSTA_TFERFFIF) is not correct + * while the TX-FIFO tail index is. + * + * We assume the TX-FIFO is empty, i.e. all pending CAN frames + * haven been send, if: + * - Chip's head and tail index are equal (len == 0). + * - The TX-FIFO is less than half full. + * (The TX-FIFO empty case has already been checked at the + * beginning of this function.) + * - No free buffers in the TX ring. + */ + if (len == 0 && mcp251xfd_tx_fifo_sta_less_than_half_full(fifo_sta) && + mcp251xfd_get_tx_free(tx_ring) == 0) + len = tx_ring->obj_num; + + *len_p = len; return 0; } diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index c3a6b028ea4d..4bec1e7e7b3e 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -580,11 +580,9 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) /* bus error interrupt */ netdev_dbg(dev, "bus error interrupt\n"); priv->can.can_stats.bus_error++; - stats->rx_errors++; + ecc = readl(priv->base + SUN4I_REG_STA_ADDR); if (likely(skb)) { - ecc = readl(priv->base + SUN4I_REG_STA_ADDR); - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & SUN4I_STA_MASK_ERR) { @@ -602,9 +600,15 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) >> 16; break; } - /* error occurred during transmission? */ - if ((ecc & SUN4I_STA_ERR_DIR) == 0) + } + + /* error occurred during transmission? */ + if ((ecc & SUN4I_STA_ERR_DIR) == 0) { + if (likely(skb)) cf->data[2] |= CAN_ERR_PROT_TX; + stats->tx_errors++; + } else { + stats->rx_errors++; } } if (isrc & SUN4I_INT_ERR_PASSIVE) { @@ -630,10 +634,10 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) tx_state = txerr >= rxerr ? state : 0; rx_state = txerr <= rxerr ? state : 0; - if (likely(skb)) - can_change_state(dev, cf, tx_state, rx_state); - else - priv->can.state = state; + /* The skb allocation might fail, but can_change_state() + * handles cf == NULL. + */ + can_change_state(dev, cf, tx_state, rx_state); if (state == CAN_STATE_BUS_OFF) can_bus_off(dev); } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 050c0b49938a..5355bac4dccb 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -335,15 +335,14 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) struct net_device_stats *stats = &dev->netdev->stats; skb = alloc_can_err_skb(dev->netdev, &cf); - if (skb == NULL) - return; if (msg->type == CPC_MSG_TYPE_CAN_STATE) { u8 state = msg->msg.can_state; if (state & SJA1000_SR_BS) { dev->can.state = CAN_STATE_BUS_OFF; - cf->can_id |= CAN_ERR_BUSOFF; + if (skb) + cf->can_id |= CAN_ERR_BUSOFF; dev->can.can_stats.bus_off++; can_bus_off(dev->netdev); @@ -361,44 +360,53 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) /* bus error interrupt */ dev->can.can_stats.bus_error++; - stats->rx_errors++; - cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; + if (skb) { + cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; - switch (ecc & SJA1000_ECC_MASK) { - case SJA1000_ECC_BIT: - cf->data[2] |= CAN_ERR_PROT_BIT; - break; - case SJA1000_ECC_FORM: - cf->data[2] |= CAN_ERR_PROT_FORM; - break; - case SJA1000_ECC_STUFF: - cf->data[2] |= CAN_ERR_PROT_STUFF; - break; - default: - cf->data[3] = ecc & SJA1000_ECC_SEG; - break; + switch (ecc & SJA1000_ECC_MASK) { + case SJA1000_ECC_BIT: + cf->data[2] |= CAN_ERR_PROT_BIT; + break; + case SJA1000_ECC_FORM: + cf->data[2] |= CAN_ERR_PROT_FORM; + break; + case SJA1000_ECC_STUFF: + cf->data[2] |= CAN_ERR_PROT_STUFF; + break; + default: + cf->data[3] = ecc & SJA1000_ECC_SEG; + break; + } } /* Error occurred during transmission? */ - if ((ecc & SJA1000_ECC_DIR) == 0) - cf->data[2] |= CAN_ERR_PROT_TX; + if ((ecc & SJA1000_ECC_DIR) == 0) { + stats->tx_errors++; + if (skb) + cf->data[2] |= CAN_ERR_PROT_TX; + } else { + stats->rx_errors++; + } - if (dev->can.state == CAN_STATE_ERROR_WARNING || - dev->can.state == CAN_STATE_ERROR_PASSIVE) { + if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING || + dev->can.state == CAN_STATE_ERROR_PASSIVE)) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } } else if (msg->type == CPC_MSG_TYPE_OVERRUN) { - cf->can_id |= CAN_ERR_CRTL; - cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + if (skb) { + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + } stats->rx_over_errors++; stats->rx_errors++; } - netif_rx(skb); + if (skb) + netif_rx(skb); } /* diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 264a0f764e01..ec28d504ca66 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -38,8 +38,8 @@ #define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0 #define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8 -#define GS_USB_ENDPOINT_IN 1 -#define GS_USB_ENDPOINT_OUT 2 +#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0 +#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30 /* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts * for timer overflow (will be after ~71 minutes) @@ -323,6 +323,9 @@ struct gs_usb { struct usb_device *udev; unsigned int hf_size_rx; u8 active_channels; + + unsigned int pipe_in; + unsigned int pipe_out; }; /* 'allocate' a tx context. @@ -522,7 +525,7 @@ static void gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb, static void gs_usb_receive_bulk_callback(struct urb *urb) { - struct gs_usb *usbcan = urb->context; + struct gs_usb *parent = urb->context; struct gs_can *dev; struct net_device *netdev; int rc; @@ -533,7 +536,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) struct canfd_frame *cfd; struct sk_buff *skb; - BUG_ON(!usbcan); + BUG_ON(!parent); switch (urb->status) { case 0: /* success */ @@ -550,7 +553,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) if (hf->channel >= GS_MAX_INTF) goto device_detach; - dev = usbcan->canch[hf->channel]; + dev = parent->canch[hf->channel]; netdev = dev->netdev; stats = &netdev->stats; @@ -639,20 +642,20 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) netif_rx(skb); } - resubmit_urb: - usb_fill_bulk_urb(urb, usbcan->udev, - usb_rcvbulkpipe(usbcan->udev, GS_USB_ENDPOINT_IN), +resubmit_urb: + usb_fill_bulk_urb(urb, parent->udev, + parent->pipe_in, hf, dev->parent->hf_size_rx, - gs_usb_receive_bulk_callback, usbcan); + gs_usb_receive_bulk_callback, parent); rc = usb_submit_urb(urb, GFP_ATOMIC); /* USB failure take down all interfaces */ if (rc == -ENODEV) { - device_detach: +device_detach: for (rc = 0; rc < GS_MAX_INTF; rc++) { - if (usbcan->canch[rc]) - netif_device_detach(usbcan->canch[rc]->netdev); + if (parent->canch[rc]) + netif_device_detach(parent->canch[rc]->netdev); } } } @@ -777,7 +780,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, } usb_fill_bulk_urb(urb, dev->udev, - usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT), + dev->parent->pipe_out, hf, dev->hf_size_tx, gs_usb_xmit_callback, txc); @@ -814,12 +817,12 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; - badidx: +badidx: kfree(hf); - nomem_hf: +nomem_hf: usb_free_urb(urb); - nomem_urb: +nomem_urb: gs_free_tx_context(txc); dev_kfree_skb(skb); stats->tx_dropped++; @@ -882,8 +885,7 @@ static int gs_can_open(struct net_device *netdev) /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, - usb_rcvbulkpipe(dev->udev, - GS_USB_ENDPOINT_IN), + dev->parent->pipe_in, buf, dev->parent->hf_size_rx, gs_usb_receive_bulk_callback, parent); @@ -1311,7 +1313,7 @@ static struct gs_can *gs_make_candev(unsigned int channel, return dev; - out_free_candev: +out_free_candev: free_candev(dev->netdev); return ERR_PTR(rc); } @@ -1327,8 +1329,9 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); + struct usb_endpoint_descriptor *ep_in, *ep_out; struct gs_host_frame *hf; - struct gs_usb *dev; + struct gs_usb *parent; struct gs_host_config hconf = { .byte_order = cpu_to_le32(0x0000beef), }; @@ -1336,6 +1339,13 @@ static int gs_usb_probe(struct usb_interface *intf, unsigned int icount, i; int rc; + rc = usb_find_common_endpoints(intf->cur_altsetting, + &ep_in, &ep_out, NULL, NULL); + if (rc) { + dev_err(&intf->dev, "Required endpoints not found\n"); + return rc; + } + /* send host config */ rc = usb_control_msg_send(udev, 0, GS_USB_BREQ_HOST_FORMAT, @@ -1371,49 +1381,53 @@ static int gs_usb_probe(struct usb_interface *intf, return -EINVAL; } - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) + parent = kzalloc(sizeof(*parent), GFP_KERNEL); + if (!parent) return -ENOMEM; - init_usb_anchor(&dev->rx_submitted); + init_usb_anchor(&parent->rx_submitted); - usb_set_intfdata(intf, dev); - dev->udev = udev; + usb_set_intfdata(intf, parent); + parent->udev = udev; + + /* store the detected endpoints */ + parent->pipe_in = usb_rcvbulkpipe(parent->udev, ep_in->bEndpointAddress); + parent->pipe_out = usb_sndbulkpipe(parent->udev, ep_out->bEndpointAddress); for (i = 0; i < icount; i++) { unsigned int hf_size_rx = 0; - dev->canch[i] = gs_make_candev(i, intf, &dconf); - if (IS_ERR_OR_NULL(dev->canch[i])) { + parent->canch[i] = gs_make_candev(i, intf, &dconf); + if (IS_ERR_OR_NULL(parent->canch[i])) { /* save error code to return later */ - rc = PTR_ERR(dev->canch[i]); + rc = PTR_ERR(parent->canch[i]); /* on failure destroy previously created candevs */ icount = i; for (i = 0; i < icount; i++) - gs_destroy_candev(dev->canch[i]); + gs_destroy_candev(parent->canch[i]); - usb_kill_anchored_urbs(&dev->rx_submitted); - kfree(dev); + usb_kill_anchored_urbs(&parent->rx_submitted); + kfree(parent); return rc; } - dev->canch[i]->parent = dev; + parent->canch[i]->parent = parent; /* set RX packet size based on FD and if hardware - * timestamps are supported. - */ - if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) { - if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + * timestamps are supported. + */ + if (parent->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) { + if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) hf_size_rx = struct_size(hf, canfd_ts, 1); else hf_size_rx = struct_size(hf, canfd, 1); } else { - if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) + if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) hf_size_rx = struct_size(hf, classic_can_ts, 1); else hf_size_rx = struct_size(hf, classic_can, 1); } - dev->hf_size_rx = max(dev->hf_size_rx, hf_size_rx); + parent->hf_size_rx = max(parent->hf_size_rx, hf_size_rx); } return 0; @@ -1421,22 +1435,22 @@ static int gs_usb_probe(struct usb_interface *intf, static void gs_usb_disconnect(struct usb_interface *intf) { - struct gs_usb *dev = usb_get_intfdata(intf); + struct gs_usb *parent = usb_get_intfdata(intf); unsigned int i; usb_set_intfdata(intf, NULL); - if (!dev) { + if (!parent) { dev_err(&intf->dev, "Disconnect (nodata)\n"); return; } for (i = 0; i < GS_MAX_INTF; i++) - if (dev->canch[i]) - gs_destroy_candev(dev->canch[i]); + if (parent->canch[i]) + gs_destroy_candev(parent->canch[i]); - usb_kill_anchored_urbs(&dev->rx_submitted); - kfree(dev); + usb_kill_anchored_urbs(&parent->rx_submitted); + kfree(parent); } static const struct usb_device_id gs_usb_table[] = { @@ -1448,6 +1462,8 @@ static const struct usb_device_id gs_usb_table[] = { USB_CES_CANEXT_FD_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID, USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID, + USB_XYLANTA_SAINT3_PRODUCT_ID, 0) }, {} /* Terminating entry */ }; diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 463c6d84ae1b..1a23fcc0445c 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -373,15 +373,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); } + vc1 &= ~VC1_RX_MCST_FWD_EN; + if (enable) { vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; - vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; + vc1 |= VC1_RX_MCST_UNTAG_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; if (enable_filtering) { vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; vc5 |= VC5_DROP_VTABLE_MISS; } else { - vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; + vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; vc5 &= ~VC5_DROP_VTABLE_MISS; } @@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable, } else { vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); - vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); + vc1 &= ~VC1_RX_MCST_UNTAG_EN; vc4 &= ~VC4_ING_VID_CHECK_MASK; vc5 &= ~VC5_DROP_VTABLE_MISS; @@ -724,6 +726,15 @@ static void b53_enable_mib(struct b53_device *dev) b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); } +static void b53_enable_stp(struct b53_device *dev) +{ + u8 gc; + + b53_read8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, &gc); + gc |= GC_RX_BPDU_EN; + b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); +} + static u16 b53_default_pvid(struct b53_device *dev) { if (is5325(dev) || is5365(dev)) @@ -863,6 +874,7 @@ static int b53_switch_reset(struct b53_device *dev) } b53_enable_mib(dev); + b53_enable_stp(dev); return b53_flush_arl(dev, FAST_AGE_STATIC); } @@ -1465,12 +1477,21 @@ int b53_vlan_add(struct dsa_switch *ds, int port, bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; struct b53_vlan *vl; + u16 old_pvid, new_pvid; int err; err = b53_vlan_prepare(ds, port, vlan); if (err) return err; + b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid); + if (pvid) + new_pvid = vlan->vid; + else if (!pvid && vlan->vid == old_pvid) + new_pvid = b53_default_pvid(dev); + else + new_pvid = old_pvid; + vl = &dev->vlans[vlan->vid]; b53_get_vlan_entry(dev, vlan->vid, vl); @@ -1487,10 +1508,10 @@ int b53_vlan_add(struct dsa_switch *ds, int port, b53_set_vlan_entry(dev, vlan->vid, vl); b53_fast_age_vlan(dev, vlan->vid); - if (pvid && !dsa_is_cpu_port(ds, port)) { + if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) { b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), - vlan->vid); - b53_fast_age_vlan(dev, vlan->vid); + new_pvid); + b53_fast_age_vlan(dev, old_pvid); } return 0; @@ -1902,7 +1923,7 @@ EXPORT_SYMBOL(b53_br_join); void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { struct b53_device *dev = ds->priv; - struct b53_vlan *vl = &dev->vlans[0]; + struct b53_vlan *vl; s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; unsigned int i; u16 pvlan, reg, pvid; @@ -1928,6 +1949,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) dev->ports[port].vlan_ctl_mask = pvlan; pvid = b53_default_pvid(dev); + vl = &dev->vlans[pvid]; /* Make this port join all VLANs without VLAN entries */ if (is58xx(dev)) { @@ -1936,12 +1958,12 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) if (!(reg & BIT(cpu_port))) reg |= BIT(cpu_port); b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg); - } else { - b53_get_vlan_entry(dev, pvid, vl); - vl->members |= BIT(port) | BIT(cpu_port); - vl->untag |= BIT(port) | BIT(cpu_port); - b53_set_vlan_entry(dev, pvid, vl); } + + b53_get_vlan_entry(dev, pvid, vl); + vl->members |= BIT(port) | BIT(cpu_port); + vl->untag |= BIT(port) | BIT(cpu_port); + b53_set_vlan_entry(dev, pvid, vl); } EXPORT_SYMBOL(b53_br_leave); diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index e9fa92a83322..b854ee425fcd 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -2,7 +2,7 @@ /* * Microchip KSZ9477 switch driver main logic * - * Copyright (C) 2017-2019 Microchip Technology Inc. + * Copyright (C) 2017-2024 Microchip Technology Inc. */ #include @@ -964,26 +964,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port, int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs) { u32 secs = msecs / 1000; - u8 value; - u8 data; + u8 data, mult, value; + u32 max_val; int ret; - value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs); +#define MAX_TIMER_VAL ((1 << 8) - 1) - ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value); - if (ret < 0) - return ret; + /* The aging timer comprises a 3-bit multiplier and an 8-bit second + * value. Either of them cannot be zero. The maximum timer is then + * 7 * 255 = 1785 seconds. + */ + if (!secs) + secs = 1; - data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs); + /* Return error if too large. */ + else if (secs > 7 * MAX_TIMER_VAL) + return -EINVAL; ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value); if (ret < 0) return ret; - value &= ~SW_AGE_CNT_M; - value |= FIELD_PREP(SW_AGE_CNT_M, data); + /* Check whether there is need to update the multiplier. */ + mult = FIELD_GET(SW_AGE_CNT_M, value); + max_val = MAX_TIMER_VAL; + if (mult > 0) { + /* Try to use the same multiplier already in the register as + * the hardware default uses multiplier 4 and 75 seconds for + * 300 seconds. + */ + max_val = DIV_ROUND_UP(secs, mult); + if (max_val > MAX_TIMER_VAL || max_val * mult != secs) + max_val = MAX_TIMER_VAL; + } - return ksz_write8(dev, REG_SW_LUE_CTRL_0, value); + data = DIV_ROUND_UP(secs, max_val); + if (mult != data) { + value &= ~SW_AGE_CNT_M; + value |= FIELD_PREP(SW_AGE_CNT_M, data); + ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value); + if (ret < 0) + return ret; + } + + value = DIV_ROUND_UP(secs, data); + return ksz_write8(dev, REG_SW_LUE_CTRL_3, value); } void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index 04086e9ab0a0..ffb9484018ed 100644 --- a/drivers/net/dsa/microchip/ksz9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -2,7 +2,7 @@ /* * Microchip KSZ9477 register definitions * - * Copyright (C) 2017-2018 Microchip Technology Inc. + * Copyright (C) 2017-2024 Microchip Technology Inc. */ #ifndef __KSZ9477_REGS_H @@ -190,8 +190,6 @@ #define SW_VLAN_ENABLE BIT(7) #define SW_DROP_INVALID_VID BIT(6) #define SW_AGE_CNT_M GENMASK(5, 3) -#define SW_AGE_CNT_S 3 -#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8) #define SW_RESV_MCAST_ENABLE BIT(2) #define SW_HASH_OPTION_M 0x03 #define SW_HASH_OPTION_CRC 1 diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index d1b2db8e6533..a3a7a90dad96 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -454,6 +454,11 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value) return regmap_bulk_write(dev->regmap[2], reg, val, 2); } +static inline int ksz_rmw8(struct ksz_device *dev, int offset, u8 mask, u8 val) +{ + return regmap_update_bits(dev->regmap[0], offset, mask, val); +} + static inline int ksz_pread8(struct ksz_device *dev, int port, int offset, u8 *data) { diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c index 7e4f307a0387..338eff0818df 100644 --- a/drivers/net/dsa/microchip/lan937x_main.c +++ b/drivers/net/dsa/microchip/lan937x_main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Microchip LAN937X switch driver main logic - * Copyright (C) 2019-2022 Microchip Technology Inc. + * Copyright (C) 2019-2024 Microchip Technology Inc. */ #include #include @@ -249,10 +249,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu) int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs) { - u32 secs = msecs / 1000; - u32 value; + u8 data, mult, value8; + bool in_msec = false; + u32 max_val, value; + u32 secs = msecs; int ret; +#define MAX_TIMER_VAL ((1 << 20) - 1) + + /* The aging timer comprises a 3-bit multiplier and a 20-bit second + * value. Either of them cannot be zero. The maximum timer is then + * 7 * 1048575 = 7340025 seconds. As this value is too large for + * practical use it can be interpreted as microseconds, making the + * maximum timer 7340 seconds with finer control. This allows for + * maximum 122 minutes compared to 29 minutes in KSZ9477 switch. + */ + if (msecs % 1000) + in_msec = true; + else + secs /= 1000; + if (!secs) + secs = 1; + + /* Return error if too large. */ + else if (secs > 7 * MAX_TIMER_VAL) + return -EINVAL; + + /* Configure how to interpret the number value. */ + ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC, + in_msec ? SW_AGE_CNT_IN_MICROSEC : 0); + if (ret < 0) + return ret; + + ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8); + if (ret < 0) + return ret; + + /* Check whether there is need to update the multiplier. */ + mult = FIELD_GET(SW_AGE_CNT_M, value8); + max_val = MAX_TIMER_VAL; + if (mult > 0) { + /* Try to use the same multiplier already in the register as + * the hardware default uses multiplier 4 and 75 seconds for + * 300 seconds. + */ + max_val = DIV_ROUND_UP(secs, mult); + if (max_val > MAX_TIMER_VAL || max_val * mult != secs) + max_val = MAX_TIMER_VAL; + } + + data = DIV_ROUND_UP(secs, max_val); + if (mult != data) { + value8 &= ~SW_AGE_CNT_M; + value8 |= FIELD_PREP(SW_AGE_CNT_M, data); + ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8); + if (ret < 0) + return ret; + } + + secs = DIV_ROUND_UP(secs, data); + value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs); ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value); diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h index 5bc16a4c4441..f36ef2ce4065 100644 --- a/drivers/net/dsa/microchip/lan937x_reg.h +++ b/drivers/net/dsa/microchip/lan937x_reg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Microchip LAN937X switch register definitions - * Copyright (C) 2019-2021 Microchip Technology Inc. + * Copyright (C) 2019-2024 Microchip Technology Inc. */ #ifndef __LAN937X_REG_H #define __LAN937X_REG_H @@ -48,8 +48,7 @@ #define SW_VLAN_ENABLE BIT(7) #define SW_DROP_INVALID_VID BIT(6) -#define SW_AGE_CNT_M 0x7 -#define SW_AGE_CNT_S 3 +#define SW_AGE_CNT_M GENMASK(5, 3) #define SW_RESV_MCAST_ENABLE BIT(2) #define REG_SW_LUE_CTRL_1 0x0311 @@ -62,6 +61,10 @@ #define SW_FAST_AGING BIT(1) #define SW_LINK_AUTO_AGING BIT(0) +#define REG_SW_LUE_CTRL_2 0x0312 + +#define SW_AGE_CNT_IN_MICROSEC BIT(7) + #define REG_SW_AGE_PERIOD__1 0x0313 #define SW_AGE_PERIOD_7_0_M GENMASK(7, 0) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d94b46316a11..29e4c94c8405 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -470,11 +470,11 @@ restore_link: return err; } -static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port) +static int mv88e6xxx_phy_is_internal(struct mv88e6xxx_chip *chip, int port) { - struct mv88e6xxx_chip *chip = ds->priv; - - return port < chip->info->num_internal_phys; + return port >= chip->info->internal_phys_offset && + port < chip->info->num_internal_phys + + chip->info->internal_phys_offset; } static int mv88e6xxx_port_ppu_updates(struct mv88e6xxx_chip *chip, int port) @@ -591,7 +591,7 @@ static void mv88e6095_phylink_get_caps(struct mv88e6xxx_chip *chip, int port, config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100; - if (mv88e6xxx_phy_is_internal(chip->ds, port)) { + if (mv88e6xxx_phy_is_internal(chip, port)) { __set_bit(PHY_INTERFACE_MODE_MII, config->supported_interfaces); } else { if (cmode < ARRAY_SIZE(mv88e6185_phy_interface_modes) && @@ -839,7 +839,7 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port, chip->info->ops->phylink_get_caps(chip, port, config); mv88e6xxx_reg_unlock(chip); - if (mv88e6xxx_phy_is_internal(ds, port)) { + if (mv88e6xxx_phy_is_internal(chip, port)) { __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); /* Internal ports with no phy-mode need GMII for PHYLIB */ @@ -848,29 +848,38 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port, } } +static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port, + unsigned int mode, phy_interface_t interface) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err = 0; + + /* In inband mode, the link may come up at any time while the link + * is not forced down. Force the link down while we reconfigure the + * interface mode. + */ + if (mode == MLO_AN_INBAND && + chip->ports[port].interface != interface && + chip->info->ops->port_set_link) { + mv88e6xxx_reg_lock(chip); + err = chip->info->ops->port_set_link(chip, port, + LINK_FORCED_DOWN); + mv88e6xxx_reg_unlock(chip); + } + + return err; +} + static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state) { struct mv88e6xxx_chip *chip = ds->priv; - struct mv88e6xxx_port *p; int err = 0; - p = &chip->ports[port]; - mv88e6xxx_reg_lock(chip); - if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(ds, port)) { - /* In inband mode, the link may come up at any time while the - * link is not forced down. Force the link down while we - * reconfigure the interface mode. - */ - if (mode == MLO_AN_INBAND && - p->interface != state->interface && - chip->info->ops->port_set_link) - chip->info->ops->port_set_link(chip, port, - LINK_FORCED_DOWN); - + if (mode != MLO_AN_PHY || !mv88e6xxx_phy_is_internal(chip, port)) { err = mv88e6xxx_port_config_interface(chip, port, state->interface); if (err && err != -EOPNOTSUPP) @@ -887,24 +896,38 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port, err = 0; } +err_unlock: + mv88e6xxx_reg_unlock(chip); + + if (err && err != -EOPNOTSUPP) + dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port); +} + +static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port, + unsigned int mode, phy_interface_t interface) +{ + struct mv88e6xxx_chip *chip = ds->priv; + int err = 0; + /* Undo the forced down state above after completing configuration * irrespective of its state on entry, which allows the link to come * up in the in-band case where there is no separate SERDES. Also * ensure that the link can come up if the PPU is in use and we are * in PHY mode (we treat the PPU as an effective in-band mechanism.) */ + mv88e6xxx_reg_lock(chip); + if (chip->info->ops->port_set_link && - ((mode == MLO_AN_INBAND && p->interface != state->interface) || + ((mode == MLO_AN_INBAND && + chip->ports[port].interface != interface) || (mode == MLO_AN_PHY && mv88e6xxx_port_ppu_updates(chip, port)))) - chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); + err = chip->info->ops->port_set_link(chip, port, LINK_UNFORCED); - p->interface = state->interface; - -err_unlock: mv88e6xxx_reg_unlock(chip); - if (err && err != -EOPNOTSUPP) - dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port); + chip->ports[port].interface = interface; + + return err; } static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port, @@ -1724,6 +1747,8 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid, if (!chip->info->ops->vtu_getnext) return -EOPNOTSUPP; + memset(entry, 0, sizeof(*entry)); + entry->vid = vid ? vid - 1 : mv88e6xxx_max_vid(chip); entry->valid = false; @@ -1859,7 +1884,16 @@ static int mv88e6xxx_mst_put(struct mv88e6xxx_chip *chip, u8 sid) struct mv88e6xxx_mst *mst, *tmp; int err; - if (!sid) + /* If the SID is zero, it is for a VLAN mapped to the default MSTI, + * and mv88e6xxx_stu_setup() made sure it is always present, and thus, + * should not be removed here. + * + * If the chip lacks STU support, numerically the "sid" variable will + * happen to also be zero, but we don't want to rely on that fact, so + * we explicitly test that first. In that case, there is also nothing + * to do here. + */ + if (!mv88e6xxx_has_stu(chip) || !sid) return 0; list_for_each_entry_safe(mst, tmp, &chip->msts, node) { @@ -2107,13 +2141,11 @@ mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, return err; } -static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, - const unsigned char *addr, u16 vid, - u8 state) +static int mv88e6xxx_port_db_get(struct mv88e6xxx_chip *chip, + const unsigned char *addr, u16 vid, + u16 *fid, struct mv88e6xxx_atu_entry *entry) { - struct mv88e6xxx_atu_entry entry; struct mv88e6xxx_vtu_entry vlan; - u16 fid; int err; /* Ports have two private address databases: one for when the port is @@ -2124,7 +2156,7 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, * VLAN ID into the port's database used for VLAN-unaware bridging. */ if (vid == 0) { - fid = MV88E6XXX_FID_BRIDGED; + *fid = MV88E6XXX_FID_BRIDGED; } else { err = mv88e6xxx_vtu_get(chip, vid, &vlan); if (err) @@ -2134,14 +2166,39 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, if (!vlan.valid) return -EOPNOTSUPP; - fid = vlan.fid; + *fid = vlan.fid; } - entry.state = 0; - ether_addr_copy(entry.mac, addr); - eth_addr_dec(entry.mac); + entry->state = 0; + ether_addr_copy(entry->mac, addr); + eth_addr_dec(entry->mac); - err = mv88e6xxx_g1_atu_getnext(chip, fid, &entry); + return mv88e6xxx_g1_atu_getnext(chip, *fid, entry); +} + +static bool mv88e6xxx_port_db_find(struct mv88e6xxx_chip *chip, + const unsigned char *addr, u16 vid) +{ + struct mv88e6xxx_atu_entry entry; + u16 fid; + int err; + + err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry); + if (err) + return false; + + return entry.state && ether_addr_equal(entry.mac, addr); +} + +static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, + const unsigned char *addr, u16 vid, + u8 state) +{ + struct mv88e6xxx_atu_entry entry; + u16 fid; + int err; + + err = mv88e6xxx_port_db_get(chip, addr, vid, &fid, &entry); if (err) return err; @@ -2739,6 +2796,13 @@ static int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, addr, vid, MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC); + if (err) + goto out; + + if (!mv88e6xxx_port_db_find(chip, addr, vid)) + err = -ENOSPC; + +out: mv88e6xxx_reg_unlock(chip); return err; @@ -3664,6 +3728,21 @@ static int mv88e6xxx_stats_setup(struct mv88e6xxx_chip *chip) return mv88e6xxx_g1_stats_clear(chip); } +static int mv88e6320_setup_errata(struct mv88e6xxx_chip *chip) +{ + u16 dummy; + int err; + + /* Workaround for erratum + * 3.3 RGMII timing may be out of spec when transmit delay is enabled + */ + err = mv88e6xxx_port_hidden_write(chip, 0, 0xf, 0x7, 0xe000); + if (err) + return err; + + return mv88e6xxx_port_hidden_read(chip, 0, 0xf, 0x7, &dummy); +} + /* Check if the errata has already been applied. */ static bool mv88e6390_setup_errata_applied(struct mv88e6xxx_chip *chip) { @@ -5080,6 +5159,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { static const struct mv88e6xxx_ops mv88e6320_ops = { /* MV88E6XXX_FAMILY_6320 */ + .setup_errata = mv88e6320_setup_errata, .ieee_pri_map = mv88e6085_g1_ieee_pri_map, .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, @@ -5093,6 +5173,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -5117,8 +5198,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, - .vtu_getnext = mv88e6185_g1_vtu_getnext, - .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .stu_getnext = mv88e6352_g1_stu_getnext, + .stu_loadpurge = mv88e6352_g1_stu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -5127,6 +5210,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { static const struct mv88e6xxx_ops mv88e6321_ops = { /* MV88E6XXX_FAMILY_6320 */ + .setup_errata = mv88e6320_setup_errata, .ieee_pri_map = mv88e6085_g1_ieee_pri_map, .ip_pri_map = mv88e6085_g1_ip_pri_map, .irl_init_all = mv88e6352_g2_irl_init_all, @@ -5140,6 +5224,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay, .port_set_speed_duplex = mv88e6185_port_set_speed_duplex, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -5163,8 +5248,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait, .hardware_reset_post = mv88e6xxx_g2_eeprom_wait, .reset = mv88e6352_g1_reset, - .vtu_getnext = mv88e6185_g1_vtu_getnext, - .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, + .vtu_getnext = mv88e6352_g1_vtu_getnext, + .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, + .stu_getnext = mv88e6352_g1_stu_getnext, + .stu_loadpurge = mv88e6352_g1_stu_loadpurge, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6352_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, @@ -5732,7 +5819,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, - .atu_move_port_mask = 0x1f, + .atu_move_port_mask = 0xf, .g1_irqs = 9, .g2_irqs = 10, .pvt = true, @@ -6131,9 +6218,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_databases = 4096, .num_macs = 8192, .num_ports = 7, - .num_internal_phys = 5, + .num_internal_phys = 2, + .internal_phys_offset = 3, .num_gpio = 15, .max_vid = 4095, + .max_sid = 63, .port_base_addr = 0x10, .phy_base_addr = 0x0, .global1_addr = 0x1b, @@ -6156,9 +6245,11 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .num_databases = 4096, .num_macs = 8192, .num_ports = 7, - .num_internal_phys = 5, + .num_internal_phys = 2, + .internal_phys_offset = 3, .num_gpio = 15, .max_vid = 4095, + .max_sid = 63, .port_base_addr = 0x10, .phy_base_addr = 0x0, .global1_addr = 0x1b, @@ -6167,6 +6258,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .g1_irqs = 8, .g2_irqs = 10, .atu_move_port_mask = 0xf, + .pvt = true, .multi_chip = true, .edsa_support = MV88E6XXX_EDSA_SUPPORTED, .ptp_support = true, @@ -6189,7 +6281,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = { .global1_addr = 0x1b, .global2_addr = 0x1c, .age_time_coeff = 3750, - .atu_move_port_mask = 0x1f, + .atu_move_port_mask = 0xf, .g1_irqs = 9, .g2_irqs = 10, .pvt = true, @@ -6502,6 +6594,13 @@ static int mv88e6xxx_port_mdb_add(struct dsa_switch *ds, int port, mv88e6xxx_reg_lock(chip); err = mv88e6xxx_port_db_load_purge(chip, port, mdb->addr, mdb->vid, MV88E6XXX_G1_ATU_DATA_STATE_MC_STATIC); + if (err) + goto out; + + if (!mv88e6xxx_port_db_find(chip, mdb->addr, mdb->vid)) + err = -ENOSPC; + +out: mv88e6xxx_reg_unlock(chip); return err; @@ -6959,7 +7058,9 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = { .port_teardown = mv88e6xxx_port_teardown, .phylink_get_caps = mv88e6xxx_get_caps, .phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state, + .phylink_mac_prepare = mv88e6xxx_mac_prepare, .phylink_mac_config = mv88e6xxx_mac_config, + .phylink_mac_finish = mv88e6xxx_mac_finish, .phylink_mac_an_restart = mv88e6xxx_serdes_pcs_an_restart, .phylink_mac_link_down = mv88e6xxx_mac_link_down, .phylink_mac_link_up = mv88e6xxx_mac_link_up, @@ -7162,13 +7263,13 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) err = mv88e6xxx_switch_reset(chip); mv88e6xxx_reg_unlock(chip); if (err) - goto out; + goto out_phy; if (np) { chip->irq = of_irq_get(np, 0); if (chip->irq == -EPROBE_DEFER) { err = chip->irq; - goto out; + goto out_phy; } } @@ -7187,7 +7288,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev) mv88e6xxx_reg_unlock(chip); if (err) - goto out; + goto out_phy; if (chip->info->g2_irqs > 0) { err = mv88e6xxx_g2_irq_setup(chip); @@ -7227,6 +7328,8 @@ out_g1_irq: mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); +out_phy: + mv88e6xxx_phy_destroy(chip); out: if (pdata) dev_put(pdata->netdev); @@ -7249,7 +7352,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_ptp_free(chip); } - mv88e6xxx_phy_destroy(chip); mv88e6xxx_unregister_switch(chip); mv88e6xxx_mdios_unregister(chip); @@ -7263,6 +7365,8 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) mv88e6xxx_g1_irq_free(chip); else mv88e6xxx_irq_poll_free(chip); + + mv88e6xxx_phy_destroy(chip); } static void mv88e6xxx_shutdown(struct mdio_device *mdiodev) diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h index b34e96e689d5..4a8b56ed1bd6 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.h +++ b/drivers/net/dsa/mv88e6xxx/chip.h @@ -167,6 +167,11 @@ struct mv88e6xxx_info { /* Supports PTP */ bool ptp_support; + + /* Internal PHY start index. 0 means that internal PHYs range starts at + * port 0, 1 means internal PHYs range starts at port 1, etc + */ + unsigned int internal_phys_offset; }; struct mv88e6xxx_atu_entry { diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c index 1266eabee086..2ab2eb2cb47b 100644 --- a/drivers/net/dsa/mv88e6xxx/devlink.c +++ b/drivers/net/dsa/mv88e6xxx/devlink.c @@ -743,7 +743,8 @@ void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds) int i; for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) - dsa_devlink_region_destroy(chip->regions[i]); + if (chip->regions[i]) + dsa_devlink_region_destroy(chip->regions[i]); } void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port) diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c index ac302a935ce6..8480d08e6f94 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.c +++ b/drivers/net/dsa/mv88e6xxx/global2.c @@ -1184,31 +1184,22 @@ out: int mv88e6xxx_g2_irq_mdio_setup(struct mv88e6xxx_chip *chip, struct mii_bus *bus) { - int phy, irq, err, err_phy; + int phy_start = chip->info->internal_phys_offset; + int phy_end = chip->info->internal_phys_offset + + chip->info->num_internal_phys; + int phy, irq; - for (phy = 0; phy < chip->info->num_internal_phys; phy++) { + for (phy = phy_start; phy < phy_end; phy++) { irq = irq_find_mapping(chip->g2_irq.domain, phy); - if (irq < 0) { - err = irq; - goto out; - } + if (irq < 0) + return irq; + bus->irq[chip->info->phy_base_addr + phy] = irq; } return 0; -out: - err_phy = phy; - - for (phy = 0; phy < err_phy; phy++) - irq_dispose_mapping(bus->irq[phy]); - - return err; } void mv88e6xxx_g2_irq_mdio_free(struct mv88e6xxx_chip *chip, struct mii_bus *bus) { - int phy; - - for (phy = 0; phy < chip->info->num_internal_phys; phy++) - irq_dispose_mapping(bus->irq[phy]); } diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c index 252b5b3a3efe..d2104bd346ea 100644 --- a/drivers/net/dsa/mv88e6xxx/phy.c +++ b/drivers/net/dsa/mv88e6xxx/phy.c @@ -197,7 +197,10 @@ static void mv88e6xxx_phy_ppu_state_init(struct mv88e6xxx_chip *chip) static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip) { + mutex_lock(&chip->ppu_mutex); del_timer_sync(&chip->ppu_timer); + cancel_work_sync(&chip->ppu_work); + mutex_unlock(&chip->ppu_mutex); } int mv88e6185_phy_ppu_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus, diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 0186482194d2..67af798686b8 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -22,7 +22,7 @@ #define VSC9959_NUM_PORTS 6 #define VSC9959_TAS_GCL_ENTRY_MAX 63 -#define VSC9959_TAS_MIN_GATE_LEN_NS 33 +#define VSC9959_TAS_MIN_GATE_LEN_NS 35 #define VSC9959_VCAP_POLICER_BASE 63 #define VSC9959_VCAP_POLICER_MAX 383 #define VSC9959_SWITCH_PCI_BAR 4 @@ -1057,11 +1057,15 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot) mdiobus_free(felix->imdio); } -/* The switch considers any frame (regardless of size) as eligible for - * transmission if the traffic class gate is open for at least 33 ns. +/* The switch considers any frame (regardless of size) as eligible + * for transmission if the traffic class gate is open for at least + * VSC9959_TAS_MIN_GATE_LEN_NS. + * * Overruns are prevented by cropping an interval at the end of the gate time - * slot for which egress scheduling is blocked, but we need to still keep 33 ns - * available for one packet to be transmitted, otherwise the port tc will hang. + * slot for which egress scheduling is blocked, but we need to still keep + * VSC9959_TAS_MIN_GATE_LEN_NS available for one packet to be transmitted, + * otherwise the port tc will hang. + * * This function returns the size of a gate interval that remains available for * setting the guard band, after reserving the space for one egress frame. */ @@ -1293,7 +1297,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port) * per-tc static guard band lengths, so it reduces the * useful gate interval length. Therefore, be careful * to calculate a guard band (and therefore max_sdu) - * that still leaves 33 ns available in the time slot. + * that still leaves VSC9959_TAS_MIN_GATE_LEN_NS + * available in the time slot. */ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte); /* A TC gate may be completely closed, which is a @@ -1512,7 +1517,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) struct tc_taprio_qopt_offload *taprio; struct ocelot_port *ocelot_port; struct timespec64 base_ts; - int port; + int i, port; u32 val; mutex_lock(&ocelot->tas_lock); @@ -1544,6 +1549,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot) QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M, QSYS_PARAM_CFG_REG_3); + for (i = 0; i < taprio->num_entries; i++) + vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]); + ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE, QSYS_TAS_PARAM_CFG_CTRL); diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index 641692f716f8..47e9b2c30383 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -551,7 +551,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy, * We therefore need to lock the MDIO bus onto which the switch is * connected. */ - mutex_lock(&priv->bus->mdio_lock); + mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED); /* Actually start the request: * 1. Send mdio master packet diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index f1f1368e8146..d51d982c4bc0 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2083,6 +2083,7 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, switch (state) { case BR_STATE_DISABLED: case BR_STATE_BLOCKING: + case BR_STATE_LISTENING: /* From UM10944 description of DRPDTAG (why put this there?): * "Management traffic flows to the port regardless of the state * of the INGRESS flag". So BPDUs are still be allowed to pass. @@ -2092,11 +2093,6 @@ static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port, mac[port].egress = false; mac[port].dyn_learn = false; break; - case BR_STATE_LISTENING: - mac[port].ingress = true; - mac[port].egress = false; - mac[port].dyn_learn = false; - break; case BR_STATE_LEARNING: mac[port].ingress = true; mac[port].egress = false; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index 230726d7b74f..d41b58fad37b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -373,8 +373,13 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, } /* Set up the header page info */ - xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, - XGBE_SKB_ALLOC_SIZE); + if (pdata->netdev->features & NETIF_F_RXCSUM) { + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + XGBE_SKB_ALLOC_SIZE); + } else { + xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa, + pdata->rx_buf_size); + } /* Set up the buffer page info */ xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 4030d619e84f..3cf7943b590c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -320,6 +320,18 @@ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); } +static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata) +{ + unsigned int i; + + for (i = 0; i < pdata->channel_count; i++) { + if (!pdata->channel[i]->rx_ring) + break; + + XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0); + } +} + static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type, unsigned int index, unsigned int val) { @@ -3495,8 +3507,12 @@ static int xgbe_init(struct xgbe_prv_data *pdata) xgbe_config_tx_coalesce(pdata); xgbe_config_rx_buffer_size(pdata); xgbe_config_tso_mode(pdata); - xgbe_config_sph_mode(pdata); - xgbe_config_rss(pdata); + + if (pdata->netdev->features & NETIF_F_RXCSUM) { + xgbe_config_sph_mode(pdata); + xgbe_config_rss(pdata); + } + desc_if->wrapper_tx_desc_init(pdata); desc_if->wrapper_rx_desc_init(pdata); xgbe_enable_dma_interrupts(pdata); @@ -3650,5 +3666,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->disable_vxlan = xgbe_disable_vxlan; hw_if->set_vxlan_id = xgbe_set_vxlan_id; + /* For Split Header*/ + hw_if->enable_sph = xgbe_config_sph_mode; + hw_if->disable_sph = xgbe_disable_sph_mode; + DBGPR("<--xgbe_init_function_ptrs\n"); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 6b73648b3779..34d45cebefb5 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2257,10 +2257,17 @@ static int xgbe_set_features(struct net_device *netdev, if (ret) return ret; - if ((features & NETIF_F_RXCSUM) && !rxcsum) + if ((features & NETIF_F_RXCSUM) && !rxcsum) { + hw_if->enable_sph(pdata); + hw_if->enable_vxlan(pdata); hw_if->enable_rx_csum(pdata); - else if (!(features & NETIF_F_RXCSUM) && rxcsum) + schedule_work(&pdata->restart_work); + } else if (!(features & NETIF_F_RXCSUM) && rxcsum) { + hw_if->disable_sph(pdata); + hw_if->disable_vxlan(pdata); hw_if->disable_rx_csum(pdata); + schedule_work(&pdata->restart_work); + } if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan) hw_if->enable_rx_vlan_stripping(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index c731a04731f8..ff3751a0345c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -856,7 +856,6 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int phy_id = phy_data->phydev->phy_id; @@ -878,14 +877,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) phy_write(phy_data->phydev, 0x04, 0x0d01); phy_write(phy_data->phydev, 0x00, 0x9140); - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - supported); - linkmode_set_bit_array(phy_gbit_features_array, - ARRAY_SIZE(phy_gbit_features_array), - supported); - - linkmode_copy(phy_data->phydev->supported, supported); + linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES); phy_support_asym_pause(phy_data->phydev); @@ -897,7 +889,6 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) { - __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; unsigned int phy_id = phy_data->phydev->phy_id; @@ -961,13 +952,7 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) reg = phy_read(phy_data->phydev, 0x00); phy_write(phy_data->phydev, 0x00, reg & ~0x00800); - linkmode_set_bit_array(phy_10_100_features_array, - ARRAY_SIZE(phy_10_100_features_array), - supported); - linkmode_set_bit_array(phy_gbit_features_array, - ARRAY_SIZE(phy_gbit_features_array), - supported); - linkmode_copy(phy_data->phydev->supported, supported); + linkmode_copy(phy_data->phydev->supported, PHY_GBIT_FEATURES); phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 7a41367c437d..b17c7d1dc4b0 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -859,6 +859,10 @@ struct xgbe_hw_if { void (*enable_vxlan)(struct xgbe_prv_data *); void (*disable_vxlan)(struct xgbe_prv_data *); void (*set_vxlan_id)(struct xgbe_prv_data *); + + /* For Split Header */ + void (*enable_sph)(struct xgbe_prv_data *pdata); + void (*disable_sph)(struct xgbe_prv_data *pdata); }; /* This structure represents implementation specific routines for an diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 379d19d18dbe..5808e3c73a8f 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -9,8 +9,6 @@ #include "main.h" -static const struct acpi_device_id xge_acpi_match[]; - static int xge_get_resources(struct xge_pdata *pdata) { struct platform_device *pdev; @@ -733,7 +731,7 @@ MODULE_DEVICE_TABLE(acpi, xge_acpi_match); static struct platform_driver xge_driver = { .driver = { .name = "xgene-enet-v2", - .acpi_match_table = ACPI_PTR(xge_acpi_match), + .acpi_match_table = xge_acpi_match, }, .probe = xge_probe, .remove = xge_remove, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 06508eebb585..a467c8f91020 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -1436,7 +1436,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down) aq_ptp_ring_free(self); aq_ptp_free(self); - if (likely(self->aq_fw_ops->deinit) && link_down) { + /* May be invoked during hot unplug. */ + if (pci_device_is_present(self->pdev) && + likely(self->aq_fw_ops->deinit) && link_down) { mutex_lock(&self->fwreq_mutex); self->aq_fw_ops->deinit(self->aq_hw); mutex_unlock(&self->fwreq_mutex); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 1693f6c60efc..e53ab9b5482e 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1956,7 +1956,11 @@ static int bcm_sysport_open(struct net_device *dev) unsigned int i; int ret; - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) { + netdev_err(dev, "could not enable priv clock\n"); + return ret; + } /* Reset UniMAC */ umac_reset(priv); @@ -2614,7 +2618,11 @@ static int bcm_sysport_probe(struct platform_device *pdev) goto err_deregister_notifier; } - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) { + dev_err(&pdev->dev, "could not enable priv clock\n"); + goto err_deregister_netdev; + } priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK; dev_info(&pdev->dev, @@ -2628,6 +2636,8 @@ static int bcm_sysport_probe(struct platform_device *pdev) return 0; +err_deregister_netdev: + unregister_netdev(dev); err_deregister_notifier: unregister_netdevice_notifier(&priv->netdev_notifier); err_deregister_fixed_link: @@ -2799,7 +2809,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d) if (!netif_running(dev)) return 0; - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) { + netdev_err(dev, "could not enable priv clock\n"); + return ret; + } + if (priv->wolopts) clk_disable_unprepare(priv->wol_clk); diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index b4381cd41979..3f4e8bac40c1 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -171,6 +171,7 @@ static int platform_phy_connect(struct bgmac *bgmac) static int bgmac_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; + struct device_node *phy_node; struct bgmac *bgmac; struct resource *regs; int ret; @@ -236,7 +237,9 @@ static int bgmac_probe(struct platform_device *pdev) bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset; bgmac->get_bus_clock = platform_bgmac_get_bus_clock; bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32; - if (of_parse_phandle(np, "phy-handle", 0)) { + phy_node = of_parse_phandle(np, "phy-handle", 0); + if (phy_node) { + of_node_put(phy_node); bgmac->phy_connect = platform_phy_connect; } else { bgmac->phy_connect = bgmac_phy_connect_direct; diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index d73ef262991d..6fee9a41839c 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -328,8 +328,7 @@ #define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */ #define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \ BGMAC_RX_FRAME_OFFSET) -/* Jumbo frame size with FCS */ -#define BGMAC_RX_MAX_FRAME_SIZE 9724 +#define BGMAC_RX_MAX_FRAME_SIZE 1536 #define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE) #define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index a1783faf4fe9..afa591ec7a13 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13671,19 +13671,20 @@ static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir, return bnx2x_func_state_change(bp, &func_params); } -static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int bnx2x_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info); int rc; int drift_dir = 1; int val, period, period1, period2, dif, dif1, dif2; int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0; + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); - DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb); + DP(BNX2X_MSG_PTP, "PTP adjfine called, ppb = %d\n", ppb); if (!netif_running(bp->dev)) { DP(BNX2X_MSG_PTP, - "PTP adjfreq called while the interface is down\n"); + "PTP adjfine called while the interface is down\n"); return -ENETDOWN; } @@ -13818,7 +13819,7 @@ void bnx2x_register_phc(struct bnx2x *bp) bp->ptp_clock_info.n_ext_ts = 0; bp->ptp_clock_info.n_per_out = 0; bp->ptp_clock_info.pps = 0; - bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq; + bp->ptp_clock_info.adjfine = bnx2x_ptp_adjfine; bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime; bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime; bp->ptp_clock_info.settime64 = bnx2x_ptp_settime; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 20e2fae64e67..6b1245a3ab4b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1994,7 +1994,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (!skb) goto oom_next_rx; } else { - skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1); + skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, + rxr->page_pool, &xdp); if (!skb) { /* we should be able to free the old skb here */ bnxt_xdp_buff_frags_free(rxr, &xdp); @@ -4040,7 +4041,7 @@ int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode) struct net_device *dev = bp->dev; if (page_mode) { - bp->flags &= ~BNXT_FLAG_AGG_RINGS; + bp->flags &= ~(BNXT_FLAG_AGG_RINGS | BNXT_FLAG_NO_AGG_RINGS); bp->flags |= BNXT_FLAG_RX_PAGE_MODE; if (bp->xdp_prog->aux->xdp_has_frags) @@ -12798,6 +12799,14 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) bnxt_close_nic(bp, true, false); dev->mtu = new_mtu; + + /* MTU change may change the AGG ring settings if an XDP multi-buffer + * program is attached. We need to set the AGG rings settings and + * rx_skb_func accordingly. + */ + if (READ_ONCE(bp->xdp_prog)) + bnxt_set_rx_skb_mode(bp, true); + bnxt_set_ring_params(bp); if (netif_running(dev)) @@ -14113,8 +14122,12 @@ static void bnxt_io_resume(struct pci_dev *pdev) rtnl_lock(); err = bnxt_hwrm_func_qcaps(bp); - if (!err && netif_running(netdev)) - err = bnxt_open(netdev); + if (!err) { + if (netif_running(netdev)) + err = bnxt_open(netdev); + else + err = bnxt_reserve_rings(bp, true); + } bnxt_ulp_start(bp, err); if (!err) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c index c06789882036..32813cdd5aa5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c @@ -66,20 +66,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, } } - if (info->dest_buf) { - if ((info->seg_start + off + len) <= - BNXT_COREDUMP_BUF_LEN(info->buf_len)) { - memcpy(info->dest_buf + off, dma_buf, len); - } else { - rc = -ENOBUFS; - break; - } - } - if (cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE)) info->dest_buf_size += len; + if (info->dest_buf) { + if ((info->seg_start + off + len) <= + BNXT_COREDUMP_BUF_LEN(info->buf_len)) { + u16 copylen = min_t(u16, len, + info->dest_buf_size - off); + + memcpy(info->dest_buf + off, dma_buf, copylen); + if (copylen < len) + break; + } else { + rc = -ENOBUFS; + if (cmn_req->req_type == + cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) { + kfree(info->dest_buf); + info->dest_buf = NULL; + } + break; + } + } + if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE)) break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 799adba0034a..7daaed4520ac 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1392,6 +1392,17 @@ static int bnxt_get_regs_len(struct net_device *dev) return reg_len; } +#define BNXT_PCIE_32B_ENTRY(start, end) \ + { offsetof(struct pcie_ctx_hw_stats, start), \ + offsetof(struct pcie_ctx_hw_stats, end) } + +static const struct { + u16 start; + u16 end; +} bnxt_pcie_32b_entries[] = { + BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]), +}; + static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { @@ -1423,12 +1434,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); rc = hwrm_req_send(bp, req); if (!rc) { - __le64 *src = (__le64 *)hw_pcie_stats; - u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); - int i; + u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN); + u8 *src = (u8 *)hw_pcie_stats; + int i, j; - for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) - dst[i] = le64_to_cpu(src[i]); + for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) { + if (i >= bnxt_pcie_32b_entries[j].start && + i <= bnxt_pcie_32b_entries[j].end) { + u32 *dst32 = (u32 *)(dst + i); + + *dst32 = le32_to_cpu(*(__le32 *)(src + i)); + i += 4; + if (i > bnxt_pcie_32b_entries[j].end && + j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1) + j++; + } else { + u64 *dst64 = (u64 *)(dst + i); + + *dst64 = le64_to_cpu(*(__le64 *)(src + i)); + i += 8; + } + } } hwrm_req_drop(bp, req); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 2e54bf4fc7a7..c0f67db641c3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -252,7 +252,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id, rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len); if (rc) - return rc; + goto drop_req; hwrm_req_timeout(bp, req, fw_msg->timeout); resp = hwrm_req_hold(bp, req); @@ -264,6 +264,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id, memcpy(fw_msg->resp, resp, resp_len); } +drop_req: hwrm_req_drop(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index aa56db138d6b..d9a7b85343a4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -462,20 +462,13 @@ int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp) struct sk_buff * bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, - struct page_pool *pool, struct xdp_buff *xdp, - struct rx_cmp_ext *rxcmp1) + struct page_pool *pool, struct xdp_buff *xdp) { struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); if (!skb) return NULL; - skb_checksum_none_assert(skb); - if (RX_CMP_L4_CS_OK(rxcmp1)) { - if (bp->dev->features & NETIF_F_RXCSUM) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = RX_CMP_ENCAP(rxcmp1); - } - } + xdp_update_skb_shared_info(skb, num_frags, sinfo->xdp_frags_size, BNXT_RX_PAGE_SIZE * sinfo->nr_frags, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h index ea430d6961df..ae8159c5f56c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h @@ -33,6 +33,5 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, struct xdp_buff *xdp); struct sk_buff *bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags, struct page_pool *pool, - struct xdp_buff *xdp, - struct rx_cmp_ext *rxcmp1); + struct xdp_buff *xdp); #endif diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 0c694ab3c110..95d460237835 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include @@ -17736,6 +17737,9 @@ static int tg3_init_one(struct pci_dev *pdev, } else persist_dma_mask = dma_mask = DMA_BIT_MASK(64); + if (tg3_asic_rev(tp) == ASIC_REV_57766) + persist_dma_mask = DMA_BIT_MASK(31); + /* Configure DMA attributes. */ if (dma_mask > DMA_BIT_MASK(32)) { err = dma_set_mask(&pdev->dev, dma_mask); @@ -18111,6 +18115,50 @@ unlock: static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume); +/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal + * PCIe AER event on the tg3 device if the tg3 device is not, or cannot + * be, powered down. + */ +static const struct dmi_system_id tg3_restart_aer_quirk_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"), + }, + }, + {} +}; + static void tg3_shutdown(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -18127,6 +18175,19 @@ static void tg3_shutdown(struct pci_dev *pdev) if (system_state == SYSTEM_POWER_OFF) tg3_power_down(tp); + else if (system_state == SYSTEM_RESTART && + dmi_first_match(tg3_restart_aer_quirk_table) && + pdev->current_state != PCI_D3cold && + pdev->current_state != PCI_UNKNOWN) { + /* Disable PCIe AER on the tg3 to avoid a fatal + * error during this system restart. + */ + pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_CERE | + PCI_EXP_DEVCTL_NFERE | + PCI_EXP_DEVCTL_FERE | + PCI_EXP_DEVCTL_URRE); + } rtnl_unlock(); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 1aa578c1ca4a..8d66de71ea60 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -1271,6 +1271,8 @@ struct macb { struct clk *rx_clk; struct clk *tsu_clk; struct net_device *dev; + /* Protects hw_stats and ethtool_stats */ + spinlock_t stats_lock; union { struct macb_stats macb; struct gem_stats gem; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index d44d53d69762..d2f4709dee0d 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -962,22 +962,15 @@ static void macb_update_stats(struct macb *bp) static int macb_halt_tx(struct macb *bp) { - unsigned long halt_time, timeout; - u32 status; + u32 status; macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT)); - timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT); - do { - halt_time = jiffies; - status = macb_readl(bp, TSR); - if (!(status & MACB_BIT(TGO))) - return 0; - - udelay(250); - } while (time_before(halt_time, timeout)); - - return -ETIMEDOUT; + /* Poll TSR until TGO is cleared or timeout. */ + return read_poll_timeout_atomic(macb_readl, status, + !(status & MACB_BIT(TGO)), + 250, MACB_HALT_TIMEOUT, false, + bp, TSR); } static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget) @@ -1936,10 +1929,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(ISR_ROVR)) { /* We missed at least one packet */ + spin_lock(&bp->stats_lock); if (macb_is_gem(bp)) bp->hw_stats.gem.rx_overruns++; else bp->hw_stats.macb.rx_overruns++; + spin_unlock(&bp->stats_lock); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); @@ -2999,6 +2994,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) if (!netif_running(bp->dev)) return nstat; + spin_lock_irq(&bp->stats_lock); gem_update_stats(bp); nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + @@ -3028,6 +3024,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; nstat->tx_fifo_errors = hwstat->tx_underrun; + spin_unlock_irq(&bp->stats_lock); return nstat; } @@ -3035,12 +3032,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp) static void gem_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { - struct macb *bp; + struct macb *bp = netdev_priv(dev); - bp = netdev_priv(dev); + spin_lock_irq(&bp->stats_lock); gem_update_stats(bp); memcpy(data, &bp->ethtool_stats, sizeof(u64) * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES)); + spin_unlock_irq(&bp->stats_lock); } static int gem_get_sset_count(struct net_device *dev, int sset) @@ -3090,6 +3088,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) return gem_get_stats(bp); /* read stats from hardware */ + spin_lock_irq(&bp->stats_lock); macb_update_stats(bp); /* Convert HW stats into netdevice stats */ @@ -3123,6 +3122,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev) nstat->tx_carrier_errors = hwstat->tx_carrier_errors; nstat->tx_fifo_errors = hwstat->tx_underruns; /* Don't know about heartbeat or window errors... */ + spin_unlock_irq(&bp->stats_lock); return nstat; } @@ -4949,6 +4949,7 @@ static int macb_probe(struct platform_device *pdev) bp->usrio = macb_config->usrio; spin_lock_init(&bp->lock); + spin_lock_init(&bp->stats_lock); /* setup capabilities */ macb_configure_caps(bp, macb_config); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 98793b2ac2c7..fd7c80edb6e8 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1512,14 +1512,17 @@ static void free_netsgbuf_with_resp(void *buf) } /** - * liquidio_ptp_adjfreq - Adjust ptp frequency + * liquidio_ptp_adjfine - Adjust ptp frequency * @ptp: PTP clock info - * @ppb: how much to adjust by, in parts-per-billion + * @scaled_ppm: how much to adjust by, in scaled parts-per-million + * + * Scaled parts per million is ppm with a 16-bit binary fractional field. */ -static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct lio *lio = container_of(ptp, struct lio, ptp_info); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); u64 comp, delta; unsigned long flags; bool neg_adj = false; @@ -1643,7 +1646,7 @@ static void oct_ptp_open(struct net_device *netdev) lio->ptp_info.n_ext_ts = 0; lio->ptp_info.n_per_out = 0; lio->ptp_info.pps = 0; - lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; + lio->ptp_info.adjfine = liquidio_ptp_adjfine; lio->ptp_info.adjtime = liquidio_ptp_adjtime; lio->ptp_info.gettime64 = liquidio_ptp_gettime; lio->ptp_info.settime64 = liquidio_ptp_settime; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 5657ac8cfca0..f1a8ae047821 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -2084,7 +2084,7 @@ void t4_idma_monitor(struct adapter *adapter, struct sge_idma_monitor_state *idma, int hz, int ticks); int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, - unsigned int naddr, u8 *addr); + u8 start, unsigned int naddr, u8 *addr); void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index, bool sleep_ok); void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 8477a93cee6b..b77897aa06c4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -2273,6 +2273,7 @@ int cxgb4_init_ethtool_filters(struct adapter *adap) eth_filter->port[i].bmap = bitmap_zalloc(nentries, GFP_KERNEL); if (!eth_filter->port[i].bmap) { ret = -ENOMEM; + kvfree(eth_filter->port[i].loc_array); goto free_eth_finfo; } } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 9cbce1faab26..c09240a5693b 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1800,7 +1800,10 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid, struct adapter *adap = container_of(t, struct adapter, tids); struct sk_buff *skb; - WARN_ON(tid_out_of_range(&adap->tids, tid)); + if (tid_out_of_range(&adap->tids, tid)) { + dev_err(adap->pdev_dev, "tid %d out of range\n", tid); + return; + } if (t->tid_tab[tid - adap->tids.tid_base]) { t->tid_tab[tid - adap->tids.tid_base] = NULL; @@ -3247,7 +3250,7 @@ static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac) dev_info(pi->adapter->pdev_dev, "Setting MAC %pM on VF %d\n", mac, vf); - ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac); + ret = t4_set_vf_mac_acl(adap, vf + 1, pi->lport, 1, mac); if (!ret) ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac); return ret; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 5bf117d2179f..cbd06d9b95d4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -194,17 +194,20 @@ int cxgb4_ptp_redirect_rx_packet(struct adapter *adapter, struct port_info *pi) } /** - * cxgb4_ptp_adjfreq - Adjust frequency of PHC cycle counter + * cxgb4_ptp_adjfine - Adjust frequency of PHC cycle counter * @ptp: ptp clock structure - * @ppb: Desired frequency change in parts per billion + * @scaled_ppm: Desired frequency in scaled parts per billion * - * Adjust the frequency of the PHC cycle counter by the indicated ppb from + * Adjust the frequency of the PHC cycle counter by the indicated amount from * the base frequency. + * + * Scaled parts per million is ppm with a 16-bit binary fractional field. */ -static int cxgb4_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int cxgb4_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct adapter *adapter = (struct adapter *)container_of(ptp, struct adapter, ptp_clock_info); + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); struct fw_ptp_cmd c; int err; @@ -404,7 +407,7 @@ static const struct ptp_clock_info cxgb4_ptp_clock_info = { .n_ext_ts = 0, .n_per_out = 0, .pps = 0, - .adjfreq = cxgb4_ptp_adjfreq, + .adjfine = cxgb4_ptp_adjfine, .adjtime = cxgb4_ptp_adjtime, .gettime64 = cxgb4_ptp_gettime, .settime64 = cxgb4_ptp_settime, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 76de55306c4d..175bf9b13058 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -10215,11 +10215,12 @@ out: * t4_set_vf_mac_acl - Set MAC address for the specified VF * @adapter: The adapter * @vf: one of the VFs instantiated by the specified PF + * @start: The start port id associated with specified VF * @naddr: the number of MAC addresses * @addr: the MAC address(es) to be set to the specified VF */ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, - unsigned int naddr, u8 *addr) + u8 start, unsigned int naddr, u8 *addr) { struct fw_acl_mac_cmd cmd; @@ -10234,7 +10235,7 @@ int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd)); cmd.nmac = naddr; - switch (adapter->pf) { + switch (start) { case 3: memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3)); break; diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c index 1e55b12fee51..5c9f462ac935 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c @@ -346,8 +346,9 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, * driver. Once driver synthesizes cpl_pass_accpet_req the skb will go * through the regular cpl_pass_accept_req processing in TOM. */ - skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) - - pktshift, GFP_ATOMIC); + skb = alloc_skb(size_add(gl->tot_len, + sizeof(struct cpl_pass_accept_req)) - + pktshift, GFP_ATOMIC); if (unlikely(!skb)) return NULL; __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index b21e56de6167..c79d97f4ee90 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1778,10 +1778,11 @@ dm9000_drv_remove(struct platform_device *pdev) unregister_netdev(ndev); dm9000_release_board(pdev, dm); - free_netdev(ndev); /* free device structure */ if (dm->power_supply) regulator_disable(dm->power_supply); + free_netdev(ndev); /* free device structure */ + dev_dbg(&pdev->dev, "released and freed device\n"); return 0; } diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 2c67a857a42f..71cb7fe63de3 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev) eth_hw_addr_set(dev, psrom->mac_addr); if (np->chip_id == CHIP_IP1000A) { - np->led_mode = psrom->led_mode; + np->led_mode = le16_to_cpu(psrom->led_mode); return 0; } diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index 195dc6cfd895..0e33e2eaae96 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -335,7 +335,7 @@ typedef struct t_SROM { u16 sub_system_id; /* 0x06 */ u16 pci_base_1; /* 0x08 (IP1000A only) */ u16 pci_base_2; /* 0x0a (IP1000A only) */ - u16 led_mode; /* 0x0c (IP1000A only) */ + __le16 led_mode; /* 0x0c (IP1000A only) */ u16 reserved1[9]; /* 0x0e-0x1f */ u8 mac_addr[6]; /* 0x20-0x25 */ u8 reserved2[10]; /* 0x26-0x2f */ diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 61fe9625bed1..06f42e5b5149 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -562,7 +562,7 @@ struct be_adapter { struct be_dma_mem mbox_mem_alloced; struct be_mcc_obj mcc_obj; - struct mutex mcc_lock; /* For serializing mcc cmds to BE card */ + spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ spinlock_t mcc_cq_lock; u16 cfg_num_rx_irqs; /* configured via set-channels */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 08ec84cd21c0..d00f4e29c9d8 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -573,7 +573,7 @@ int be_process_mcc(struct be_adapter *adapter) /* Wait till no more pending mcc requests are present */ static int be_mcc_wait_compl(struct be_adapter *adapter) { -#define mcc_timeout 12000 /* 12s timeout */ +#define mcc_timeout 120000 /* 12s timeout */ int i, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; @@ -587,7 +587,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (atomic_read(&mcc_obj->q.used) == 0) break; - usleep_range(500, 1000); + udelay(100); } if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "FW not responding\n"); @@ -865,7 +865,7 @@ static bool use_mcc(struct be_adapter *adapter) static int be_cmd_lock(struct be_adapter *adapter) { if (use_mcc(adapter)) { - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); return 0; } else { return mutex_lock_interruptible(&adapter->mbox_lock); @@ -876,7 +876,7 @@ static int be_cmd_lock(struct be_adapter *adapter) static void be_cmd_unlock(struct be_adapter *adapter) { if (use_mcc(adapter)) - return mutex_unlock(&adapter->mcc_lock); + return spin_unlock_bh(&adapter->mcc_lock); else return mutex_unlock(&adapter->mbox_lock); } @@ -1046,7 +1046,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, struct be_cmd_req_mac_query *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1075,7 +1075,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1087,7 +1087,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, struct be_cmd_req_pmac_add *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1112,7 +1112,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; @@ -1130,7 +1130,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) if (pmac_id == -1) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1150,7 +1150,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1413,7 +1413,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, struct be_dma_mem *q_mem = &rxq->dma_mem; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1443,7 +1443,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1507,7 +1507,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) struct be_cmd_req_q_destroy *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1524,7 +1524,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) q->created = false; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1592,7 +1592,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) struct be_cmd_req_hdr *hdr; int status = 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1620,7 +1620,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) adapter->stats_cmd_sent = true; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1636,7 +1636,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, CMD_SUBSYSTEM_ETH)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1659,7 +1659,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, adapter->stats_cmd_sent = true; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1696,7 +1696,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, struct be_cmd_req_link_status *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); if (link_status) *link_status = LINK_DOWN; @@ -1735,7 +1735,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1746,7 +1746,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) struct be_cmd_req_get_cntl_addnl_attribs *req; int status = 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1761,7 +1761,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter) status = be_mcc_notify(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1810,7 +1810,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) if (!get_fat_cmd.va) return -ENOMEM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); while (total_size) { buf_size = min(total_size, (u32)60*1024); @@ -1848,9 +1848,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) log_offset += buf_size; } err: + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); - mutex_unlock(&adapter->mcc_lock); return status; } @@ -1861,7 +1861,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) struct be_cmd_req_get_fw_version *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1884,7 +1884,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter) sizeof(adapter->fw_on_flash)); } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1898,7 +1898,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, struct be_cmd_req_modify_eq_delay *req; int status = 0, i; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1921,7 +1921,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter, status = be_mcc_notify(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1948,7 +1948,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, struct be_cmd_req_vlan_config *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -1970,7 +1970,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -1981,7 +1981,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) struct be_cmd_req_rx_filter *req = mem->va; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2014,7 +2014,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2045,7 +2045,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2065,7 +2065,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) return -EOPNOTSUPP; @@ -2084,7 +2084,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2107,7 +2107,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2188,7 +2188,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2213,7 +2213,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2225,7 +2225,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, struct be_cmd_req_enable_disable_beacon *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2246,7 +2246,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2257,7 +2257,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) struct be_cmd_req_get_beacon_state *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2281,7 +2281,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2305,7 +2305,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, return -ENOMEM; } - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2327,7 +2327,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, memcpy(data, resp->page_data + off, len); } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); return status; } @@ -2344,7 +2344,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, void *ctxt = NULL; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2386,7 +2386,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(60000))) @@ -2405,7 +2405,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2459,7 +2459,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, struct be_mcc_wrb *wrb; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2477,7 +2477,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2490,7 +2490,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, struct lancer_cmd_resp_read_object *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2524,7 +2524,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, } err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2536,7 +2536,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_cmd_write_flashrom *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); @@ -2561,7 +2561,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(40000))) @@ -2572,7 +2572,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -2583,7 +2583,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, struct be_mcc_wrb *wrb; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -2610,7 +2610,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, memcpy(flashed_crc, req->crc, 4); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3216,7 +3216,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_cmd_req_acpi_wol_magic_config *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3233,7 +3233,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3248,7 +3248,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3271,7 +3271,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, if (status) goto err_unlock; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->et_cmd_compl, msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) @@ -3280,7 +3280,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, return status; err_unlock: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3297,7 +3297,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3323,7 +3323,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, if (status) goto err; - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); wait_for_completion(&adapter->et_cmd_compl); resp = embedded_payload(wrb); @@ -3331,7 +3331,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, return status; err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3347,7 +3347,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, CMD_SUBSYSTEM_LOWLEVEL)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3381,7 +3381,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3392,7 +3392,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_cmd_req_seeprom_read *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3408,7 +3408,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3423,7 +3423,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3468,7 +3468,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter) } dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3478,7 +3478,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) struct be_cmd_req_set_qos *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3498,7 +3498,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3610,7 +3610,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, struct be_cmd_req_get_fn_privileges *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3642,7 +3642,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3654,7 +3654,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, struct be_cmd_req_set_fn_privileges *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3674,7 +3674,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3706,7 +3706,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, return -ENOMEM; } - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3770,7 +3770,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, } out: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; @@ -3830,7 +3830,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, if (!cmd.va) return -ENOMEM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3852,7 +3852,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, err: dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3888,7 +3888,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, CMD_SUBSYSTEM_COMMON)) return -EPERM; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3929,7 +3929,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -3943,7 +3943,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, int status; u16 vid; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -3990,7 +3990,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4189,7 +4189,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, struct be_cmd_req_set_ext_fat_caps *req; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4205,7 +4205,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4683,7 +4683,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) if (iface == 0xFFFFFFFF) return -1; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4700,7 +4700,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4734,7 +4734,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, struct be_cmd_resp_get_iface_list *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4755,7 +4755,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, } err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4849,7 +4849,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) if (BEx_chip(adapter)) return 0; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4867,7 +4867,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) req->enable = 1; status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4940,7 +4940,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter, u32 link_config = 0; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -4968,7 +4968,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter, status = be_mcc_notify_wait(adapter); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -4999,8 +4999,7 @@ int be_cmd_set_features(struct be_adapter *adapter) struct be_mcc_wrb *wrb; int status; - if (mutex_lock_interruptible(&adapter->mcc_lock)) - return -1; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -5038,7 +5037,7 @@ err: dev_info(&adapter->pdev->dev, "Adapter does not support HW error recovery\n"); - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } @@ -5052,7 +5051,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, struct be_cmd_resp_hdr *resp; int status; - mutex_lock(&adapter->mcc_lock); + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { @@ -5075,7 +5074,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); err: - mutex_unlock(&adapter->mcc_lock); + spin_unlock_bh(&adapter->mcc_lock); return status; } EXPORT_SYMBOL(be_roce_mcc_cmd); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index b0a85c9b952b..173625a10886 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5672,8 +5672,8 @@ static int be_drv_init(struct be_adapter *adapter) } mutex_init(&adapter->mbox_lock); - mutex_init(&adapter->mcc_lock); mutex_init(&adapter->rx_filter_lock); + spin_lock_init(&adapter->mcc_lock); spin_lock_init(&adapter->mcc_cq_lock); init_completion(&adapter->et_cmd_compl); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 19edb1ba6d66..bf49c07c8b51 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -123,6 +123,24 @@ static int enetc_ptp_parse(struct sk_buff *skb, u8 *udp, return 0; } +/** + * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame + * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located + * @count: Number of Tx buffer descriptors which need to be unmapped + * @i: Index of the last successfully mapped Tx buffer descriptor + */ +static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i) +{ + while (count--) { + struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; + + enetc_free_tx_frame(tx_ring, tx_swbd); + if (i == 0) + i = tx_ring->bd_count; + i--; + } +} + static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) { bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false; @@ -213,9 +231,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) } if (do_onestep_tstamp) { - u32 lo, hi, val; - u64 sec, nsec; + __be32 new_sec_l, new_nsec; + u32 lo, hi, nsec, val; + __be16 new_sec_h; u8 *data; + u64 sec; lo = enetc_rd_hot(hw, ENETC_SICTR0); hi = enetc_rd_hot(hw, ENETC_SICTR1); @@ -229,13 +249,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) /* Update originTimestamp field of Sync packet * - 48 bits seconds field * - 32 bits nanseconds field + * + * In addition, the UDP checksum needs to be updated + * by software after updating originTimestamp field, + * otherwise the hardware will calculate the wrong + * checksum when updating the correction field and + * update it to the packet. */ data = skb_mac_header(skb); - *(__be16 *)(data + offset2) = - htons((sec >> 32) & 0xffff); - *(__be32 *)(data + offset2 + 2) = - htonl(sec & 0xffffffff); - *(__be32 *)(data + offset2 + 6) = htonl(nsec); + new_sec_h = htons((sec >> 32) & 0xffff); + new_sec_l = htonl(sec & 0xffffffff); + new_nsec = htonl(nsec); + if (udp) { + struct udphdr *uh = udp_hdr(skb); + __be32 old_sec_l, old_nsec; + __be16 old_sec_h; + + old_sec_h = *(__be16 *)(data + offset2); + inet_proto_csum_replace2(&uh->check, skb, old_sec_h, + new_sec_h, false); + + old_sec_l = *(__be32 *)(data + offset2 + 2); + inet_proto_csum_replace4(&uh->check, skb, old_sec_l, + new_sec_l, false); + + old_nsec = *(__be32 *)(data + offset2 + 6); + inet_proto_csum_replace4(&uh->check, skb, old_nsec, + new_nsec, false); + } + + *(__be16 *)(data + offset2) = new_sec_h; + *(__be32 *)(data + offset2 + 2) = new_sec_l; + *(__be32 *)(data + offset2 + 6) = new_nsec; /* Configure single-step register */ val = ENETC_PM0_SINGLE_STEP_EN; @@ -306,25 +351,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb) dma_err: dev_err(tx_ring->dev, "DMA map error"); - do { - tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_frame(tx_ring, tx_swbd); - if (i == 0) - i = tx_ring->bd_count; - i--; - } while (count--); + enetc_unwind_tx_frame(tx_ring, count, i); return 0; } -static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, - struct enetc_tx_swbd *tx_swbd, - union enetc_tx_bd *txbd, int *i, int hdr_len, - int data_len) +static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, + struct enetc_tx_swbd *tx_swbd, + union enetc_tx_bd *txbd, int *i, int hdr_len, + int data_len) { union enetc_tx_bd txbd_tmp; u8 flags = 0, e_flags = 0; dma_addr_t addr; + int count = 1; enetc_clear_tx_bd(&txbd_tmp); addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE; @@ -367,7 +407,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb, /* Write the BD */ txbd_tmp.ext.e_flags = e_flags; *txbd = txbd_tmp; + count++; } + + return count; } static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb, @@ -499,9 +542,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb /* compute the csum over the L4 header */ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos); - enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len); + count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, + &i, hdr_len, data_len); bd_data_num = 0; - count++; while (data_len > 0) { int size; @@ -525,8 +568,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd, tso.data, size, size == data_len); - if (err) + if (err) { + if (i == 0) + i = tx_ring->bd_count; + i--; + goto err_map_data; + } data_len -= size; count++; @@ -555,13 +603,7 @@ err_map_data: dev_err(tx_ring->dev, "DMA map error"); err_chained_bd: - do { - tx_swbd = &tx_ring->tx_swbd[i]; - enetc_free_tx_frame(tx_ring, tx_swbd); - if (i == 0) - i = tx_ring->bd_count; - i--; - } while (count--); + enetc_unwind_tx_frame(tx_ring, count, i); return 0; } @@ -1494,6 +1536,16 @@ static void enetc_xdp_drop(struct enetc_bdr *rx_ring, int rx_ring_first, } } +static void enetc_bulk_flip_buff(struct enetc_bdr *rx_ring, int rx_ring_first, + int rx_ring_last) +{ + while (rx_ring_first != rx_ring_last) { + enetc_flip_rx_buff(rx_ring, + &rx_ring->rx_swbd[rx_ring_first]); + enetc_bdr_idx_inc(rx_ring, &rx_ring_first); + } +} + static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, struct napi_struct *napi, int work_limit, struct bpf_prog *prog) @@ -1581,7 +1633,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_xdp_drop(rx_ring, orig_i, i); tx_ring->stats.xdp_tx_drops++; } else { - tx_ring->stats.xdp_tx += xdp_tx_bd_cnt; + tx_ring->stats.xdp_tx++; rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt; xdp_tx_frm_cnt++; /* The XDP_TX enqueue was successful, so we @@ -1617,11 +1669,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring, enetc_xdp_drop(rx_ring, orig_i, i); rx_ring->stats.xdp_redirect_failures++; } else { - while (orig_i != i) { - enetc_flip_rx_buff(rx_ring, - &rx_ring->rx_swbd[orig_i]); - enetc_bdr_idx_inc(rx_ring, &orig_i); - } + enetc_bulk_flip_buff(rx_ring, orig_i, i); xdp_redirect_frm_cnt++; rx_ring->stats.xdp_redirect++; } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 0a5c3d27ed3b..4a513dba8f53 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -692,7 +692,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, txq->bd.cur = bdp; /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); + if (!(fep->quirks & FEC_QUIRK_ERR007885) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active); return 0; } @@ -818,6 +823,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len, total_len, data_left; struct bufdesc *bdp = txq->bd.cur; + struct bufdesc *tmp_bdp; + struct bufdesc_ex *ebdp; struct tso_t tso; unsigned int index = 0; int ret; @@ -891,7 +898,34 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, return 0; err_release: - /* TODO: Release all used data descriptors for TSO */ + /* Release all used data descriptors for TSO */ + tmp_bdp = txq->bd.cur; + + while (tmp_bdp != bdp) { + /* Unmap data buffers */ + if (tmp_bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(tmp_bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(tmp_bdp->cbd_bufaddr), + fec16_to_cpu(tmp_bdp->cbd_datlen), + DMA_TO_DEVICE); + + /* Clear standard buffer descriptor fields */ + tmp_bdp->cbd_sc = 0; + tmp_bdp->cbd_datlen = 0; + tmp_bdp->cbd_bufaddr = 0; + + /* Handle extended descriptor if enabled */ + if (fep->bufdesc_ex) { + ebdp = (struct bufdesc_ex *)tmp_bdp; + ebdp->cbd_esc = 0; + } + + tmp_bdp = fec_enet_get_nextdesc(tmp_bdp, &txq->bd); + } + + dev_kfree_skb_any(skb); + return ret; } @@ -3508,29 +3542,6 @@ fec_set_mac_address(struct net_device *ndev, void *p) return 0; } -#ifdef CONFIG_NET_POLL_CONTROLLER -/** - * fec_poll_controller - FEC Poll controller function - * @dev: The FEC network adapter - * - * Polled functionality used by netconsole and others in non interrupt mode - * - */ -static void fec_poll_controller(struct net_device *dev) -{ - int i; - struct fec_enet_private *fep = netdev_priv(dev); - - for (i = 0; i < FEC_IRQ_NUM; i++) { - if (fep->irq[i] > 0) { - disable_irq(fep->irq[i]); - fec_enet_interrupt(fep->irq[i], dev); - enable_irq(fep->irq[i]); - } - } -} -#endif - static inline void fec_enet_set_netdev_features(struct net_device *netdev, netdev_features_t features) { @@ -3604,9 +3615,6 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, .ndo_eth_ioctl = fec_enet_ioctl, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = fec_poll_controller, -#endif .ndo_set_features = fec_set_features, }; diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c index 95f778cce98c..e4911a76f08e 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c @@ -93,7 +93,7 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of) goto out_free; } - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); bus->priv = priv; bus->parent = dev; diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c index 37d83ff5b30b..ade1d5041ae4 100644 --- a/drivers/net/ethernet/freescale/fec_ptp.c +++ b/drivers/net/ethernet/freescale/fec_ptp.c @@ -262,18 +262,21 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev) } /** - * fec_ptp_adjfreq - adjust ptp cycle frequency + * fec_ptp_adjfine - adjust ptp cycle frequency * @ptp: the ptp clock structure - * @ppb: parts per billion adjustment from base + * @scaled_ppm: scaled parts per million adjustment from base * * Adjust the frequency of the ptp cycle counter by the - * indicated ppb from the base frequency. + * indicated amount from the base frequency. + * + * Scaled parts per million is ppm with a 16-bit binary fractional field. * * Because ENET hardware frequency adjust is complex, * using software method to do that. */ -static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int fec_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); unsigned long flags; int neg_adj = 0; u32 i, tmp; @@ -588,7 +591,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx) fep->ptp_caps.n_per_out = 0; fep->ptp_caps.n_pins = 0; fep->ptp_caps.pps = 1; - fep->ptp_caps.adjfreq = fec_ptp_adjfreq; + fep->ptp_caps.adjfine = fec_ptp_adjfine; fep->ptp_caps.adjtime = fec_ptp_adjtime; fep->ptp_caps.gettime64 = fec_ptp_gettime; fep->ptp_caps.settime64 = fec_ptp_settime; diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 21de56345503..f74311273019 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -126,7 +126,7 @@ static int fs_mii_bitbang_init(struct mii_bus *bus, struct device_node *np) * we get is an int, and the odds of multiple bitbang mdio buses * is low enough that it's not worth going too crazy. */ - snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start); + snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res.start); data = of_get_property(np, "fsl,mdio-pin", &len); if (!data || len != 4) diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 033f17cb96be..7e60139f8ac5 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -327,7 +327,9 @@ gve_get_ethtool_stats(struct net_device *netdev, */ data[i++] = 0; data[i++] = 0; - data[i++] = tx->dqo_tx.tail - tx->dqo_tx.head; + data[i++] = + (tx->dqo_tx.tail - tx->dqo_tx.head) & + tx->mask; } do { start = diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index d3f6ad586ba1..8771ccfc69b4 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -202,6 +202,10 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) if (block->tx) reschedule |= gve_tx_poll(block, budget); + + if (!budget) + return 0; + if (block->rx) { work_done = gve_rx_poll(block, budget); reschedule |= work_done == budget; @@ -242,6 +246,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) if (block->tx) reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true); + if (!budget) + return 0; + if (block->rx) { work_done = gve_rx_poll_dqo(block, budget); reschedule |= work_done == budget; diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c index 021bbf308d68..639eb6848c7d 100644 --- a/drivers/net/ethernet/google/gve/gve_rx.c +++ b/drivers/net/ethernet/google/gve/gve_rx.c @@ -778,10 +778,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget) feat = block->napi.dev->features; - /* If budget is 0, do all the work */ - if (budget == 0) - budget = INT_MAX; - if (budget > 0) work_done = gve_clean_rx_done(rx, budget, feat); diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c index 5e11b8236754..bf1ac0d1dc6f 100644 --- a/drivers/net/ethernet/google/gve/gve_tx.c +++ b/drivers/net/ethernet/google/gve/gve_tx.c @@ -725,10 +725,6 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget) u32 nic_done; u32 to_do; - /* If budget is 0, do all the work */ - if (budget == 0) - budget = INT_MAX; - /* In TX path, it may try to clean completed pkts in order to xmit, * to avoid cleaning conflict, use spin_lock(), it yields better * concurrency between xmit/clean than netif's lock. diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 9a63fbc69408..b25fb400f476 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -40,6 +40,21 @@ EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare); */ static DEFINE_MUTEX(hnae3_common_lock); +/* ensure the drivers being unloaded one by one */ +static DEFINE_MUTEX(hnae3_unload_lock); + +void hnae3_acquire_unload_lock(void) +{ + mutex_lock(&hnae3_unload_lock); +} +EXPORT_SYMBOL(hnae3_acquire_unload_lock); + +void hnae3_release_unload_lock(void) +{ + mutex_unlock(&hnae3_unload_lock); +} +EXPORT_SYMBOL(hnae3_release_unload_lock); + static bool hnae3_client_match(enum hnae3_client_type client_type) { if (client_type == HNAE3_CLIENT_KNIC || diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 60b8d61af07f..7f993b3f8038 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -929,4 +929,6 @@ int hnae3_register_client(struct hnae3_client *client); void hnae3_set_client_init_flag(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev, unsigned int inited); +void hnae3_acquire_unload_lock(void); +void hnae3_release_unload_lock(void); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index d2603cfc122c..430b3ec800a9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { .name = "tm_qset", .cmd = HNAE3_DBG_CMD_TM_QSET, .dentry = HNS3_DBG_DENTRY_TM, - .buf_len = HNS3_DBG_READ_LEN, + .buf_len = HNS3_DBG_READ_LEN_1MB, .init = hns3_dbg_common_file_init, }, { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 0377a056aaec..9bcd03e1994f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -472,20 +472,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector, writel(mask_en, tqp_vector->mask_addr); } -static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector) { napi_enable(&tqp_vector->napi); enable_irq(tqp_vector->vector_irq); - - /* enable vector */ - hns3_mask_vector_irq(tqp_vector, 1); } -static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector) +static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector) { - /* disable vector */ - hns3_mask_vector_irq(tqp_vector, 0); - disable_irq(tqp_vector->vector_irq); napi_disable(&tqp_vector->napi); cancel_work_sync(&tqp_vector->rx_group.dim.work); @@ -706,11 +700,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev) return 0; } +static void hns3_enable_irqs_and_tqps(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 i; + + for (i = 0; i < priv->vector_num; i++) + hns3_irq_enable(&priv->tqp_vector[i]); + + for (i = 0; i < priv->vector_num; i++) + hns3_mask_vector_irq(&priv->tqp_vector[i], 1); + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_enable(h->kinfo.tqp[i]); +} + +static void hns3_disable_irqs_and_tqps(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = priv->ae_handle; + u16 i; + + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + + for (i = 0; i < priv->vector_num; i++) + hns3_mask_vector_irq(&priv->tqp_vector[i], 0); + + for (i = 0; i < priv->vector_num; i++) + hns3_irq_disable(&priv->tqp_vector[i]); +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hnae3_handle *h = priv->ae_handle; - int i, j; int ret; ret = hns3_nic_reset_all_ring(h); @@ -719,23 +744,13 @@ static int hns3_nic_net_up(struct net_device *netdev) clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - /* enable the vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - /* enable rcb */ - for (j = 0; j < h->kinfo.num_tqps; j++) - hns3_tqp_enable(h->kinfo.tqp[j]); + hns3_enable_irqs_and_tqps(netdev); /* start the ae_dev */ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; if (ret) { set_bit(HNS3_NIC_STATE_DOWN, &priv->state); - while (j--) - hns3_tqp_disable(h->kinfo.tqp[j]); - - for (j = i - 1; j >= 0; j--) - hns3_vector_disable(&priv->tqp_vector[j]); + hns3_disable_irqs_and_tqps(netdev); } return ret; @@ -822,17 +837,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h) static void hns3_nic_net_down(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); - struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops; - int i; - /* disable vectors */ - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - /* disable rcb */ - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_disable(h->kinfo.tqp[i]); + hns3_disable_irqs_and_tqps(netdev); /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; @@ -5869,8 +5876,6 @@ int hns3_set_channels(struct net_device *netdev, void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) { struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - int i; if (!if_running) return; @@ -5881,11 +5886,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running) netif_carrier_off(ndev); netif_tx_disable(ndev); - for (i = 0; i < priv->vector_num; i++) - hns3_vector_disable(&priv->tqp_vector[i]); - - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_disable(h->kinfo.tqp[i]); + hns3_disable_irqs_and_tqps(ndev); /* delay ring buffer clearing to hns3_reset_notify_uninit_enet * during reset process, because driver may not be able @@ -5901,7 +5902,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) { struct hns3_nic_priv *priv = netdev_priv(ndev); struct hnae3_handle *h = priv->ae_handle; - int i; if (!if_running) return; @@ -5917,11 +5917,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running) clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); - for (i = 0; i < priv->vector_num; i++) - hns3_vector_enable(&priv->tqp_vector[i]); - - for (i = 0; i < h->kinfo.num_tqps; i++) - hns3_tqp_enable(h->kinfo.tqp[i]); + hns3_enable_irqs_and_tqps(ndev); netif_tx_wake_all_queues(ndev); @@ -6007,9 +6003,11 @@ module_init(hns3_init_module); */ static void __exit hns3_exit_module(void) { + hnae3_acquire_unload_lock(); pci_unregister_driver(&hns3_driver); hnae3_unregister_client(&client); hns3_dbg_unregister_debugfs(); + hnae3_release_unload_lock(); } module_exit(hns3_exit_module); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 45bd5c79e4da..ed1b49a36016 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -13250,9 +13250,11 @@ static int __init hclge_init(void) static void __exit hclge_exit(void) { + hnae3_acquire_unload_lock(); hnae3_unregister_ae_algo_prepare(&ae_algo); hnae3_unregister_ae_algo(&ae_algo); destroy_workqueue(hclge_wq); + hnae3_release_unload_lock(); } module_init(hclge_init); module_exit(hclge_exit); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 0f06f95b09bc..b7cf9fbf9718 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -452,6 +452,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) ptp->info.settime64 = hclge_ptp_settime; ptp->info.n_alarm = 0; + + spin_lock_init(&ptp->lock); + ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; + ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; + ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; + hdev->ptp = ptp; + ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev); if (IS_ERR(ptp->clock)) { dev_err(&hdev->pdev->dev, @@ -463,12 +470,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev) return -ENODEV; } - spin_lock_init(&ptp->lock); - ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET; - ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE; - ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF; - hdev->ptp = ptp; - return 0; } @@ -496,7 +497,7 @@ int hclge_ptp_init(struct hclge_dev *hdev) ret = hclge_ptp_get_cycle(hdev); if (ret) - return ret; + goto out; } ret = hclge_ptp_int_en(hdev, true); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index aebb104f4c29..b11d38a6093f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1309,9 +1309,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev) rtnl_unlock(); } -static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclge_vf_to_pf_msg send_msg; hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN, @@ -1320,6 +1319,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0); } +static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable); + if (ret) + return ret; + + hdev->rxvtag_strip_en = enable; + return 0; +} + static int hclgevf_reset_tqp(struct hnae3_handle *handle) { #define HCLGEVF_RESET_ALL_QUEUE_DONE 1U @@ -2198,12 +2210,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) tc_valid, tc_size); } -static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) +static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev, + bool rxvtag_strip_en) { struct hnae3_handle *nic = &hdev->nic; int ret; - ret = hclgevf_en_hw_strip_rxvtag(nic, true); + ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en); if (ret) { dev_err(&hdev->pdev->dev, "failed to enable rx vlan offload, ret = %d\n", ret); @@ -2872,7 +2885,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) if (ret) return ret; - ret = hclgevf_init_vlan_config(hdev); + ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to initialize VLAN config\n", ret); @@ -2985,7 +2998,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } - ret = hclgevf_init_vlan_config(hdev); + ret = hclgevf_init_vlan_config(hdev, true); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to initialize VLAN config\n", ret); @@ -3468,8 +3481,10 @@ static int __init hclgevf_init(void) static void __exit hclgevf_exit(void) { + hnae3_acquire_unload_lock(); hnae3_unregister_ae_algo(&ae_algovf); destroy_workqueue(hclgevf_wq); + hnae3_release_unload_lock(); } module_init(hclgevf_init); module_exit(hclgevf_exit); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index 976414d00e67..1f62ac062d04 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -253,6 +253,7 @@ struct hclgevf_dev { int *vector_irq; bool gro_en; + bool rxvtag_strip_en; unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 2d6906aba2a2..af6100e5d9b9 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -172,6 +172,7 @@ err_init_txq: hinic_sq_dbgfs_uninit(nic_dev); devm_kfree(&netdev->dev, nic_dev->txqs); + nic_dev->txqs = NULL; return err; } @@ -268,6 +269,7 @@ err_init_rxq: hinic_rq_dbgfs_uninit(nic_dev); devm_kfree(&netdev->dev, nic_dev->rxqs); + nic_dev->rxqs = NULL; return err; } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 6d17738c1c53..071dca86fc88 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -116,6 +116,7 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct ibmvnic_long_term_buff *ltb); static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter); static void flush_reset_queue(struct ibmvnic_adapter *adapter); +static void print_subcrq_error(struct device *dev, int rc, const char *func); struct ibmvnic_stat { char name[ETH_GSTRING_LEN]; @@ -2135,7 +2136,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, tx_buff = &tx_pool->tx_buff[index]; adapter->netdev->stats.tx_packets--; adapter->netdev->stats.tx_bytes -= tx_buff->skb->len; - adapter->tx_stats_buffers[queue_num].packets--; + adapter->tx_stats_buffers[queue_num].batched_packets--; adapter->tx_stats_buffers[queue_num].bytes -= tx_buff->skb->len; dev_kfree_skb_any(tx_buff->skb); @@ -2160,8 +2161,29 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, } } +static int send_subcrq_direct(struct ibmvnic_adapter *adapter, + u64 remote_handle, u64 *entry) +{ + unsigned int ua = adapter->vdev->unit_address; + struct device *dev = &adapter->vdev->dev; + int rc; + + /* Make sure the hypervisor sees the complete request */ + dma_wmb(); + rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua, + cpu_to_be64(remote_handle), + cpu_to_be64(entry[0]), cpu_to_be64(entry[1]), + cpu_to_be64(entry[2]), cpu_to_be64(entry[3])); + + if (rc) + print_subcrq_error(dev, rc, __func__); + + return rc; +} + static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, - struct ibmvnic_sub_crq_queue *tx_scrq) + struct ibmvnic_sub_crq_queue *tx_scrq, + bool indirect) { struct ibmvnic_ind_xmit_queue *ind_bufp; u64 dma_addr; @@ -2176,12 +2198,18 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter, if (!entries) return 0; - rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); + + if (indirect) + rc = send_subcrq_indirect(adapter, handle, dma_addr, entries); + else + rc = send_subcrq_direct(adapter, handle, + (u64 *)ind_bufp->indir_arr); + if (rc) ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq); else ind_bufp->index = 0; - return 0; + return rc; } static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) @@ -2200,13 +2228,16 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) unsigned int tx_map_failed = 0; union sub_crq indir_arr[16]; unsigned int tx_dropped = 0; - unsigned int tx_packets = 0; + unsigned int tx_dpackets = 0; + unsigned int tx_bpackets = 0; unsigned int tx_bytes = 0; dma_addr_t data_dma_addr; struct netdev_queue *txq; unsigned long lpar_rc; + unsigned int skblen; union sub_crq tx_crq; unsigned int offset; + bool use_scrq_send_direct = false; int num_entries = 1; unsigned char *dst; int bufidx = 0; @@ -2234,7 +2265,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_dropped++; tx_send_failed++; ret = NETDEV_TX_OK; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); + if (lpar_rc != H_SUCCESS) + goto tx_err; goto out; } @@ -2249,8 +2282,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); ret = NETDEV_TX_OK; + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); + if (lpar_rc != H_SUCCESS) + goto tx_err; goto out; } @@ -2262,6 +2297,20 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) memset(dst, 0, tx_pool->buf_size); data_dma_addr = ltb->addr + offset; + /* if we are going to send_subcrq_direct this then we need to + * update the checksum before copying the data into ltb. Essentially + * these packets force disable CSO so that we can guarantee that + * FW does not need header info and we can send direct. Also, vnic + * server must be able to xmit standard packets without header data + */ + if (*hdrs == 0 && !skb_is_gso(skb) && + !ind_bufp->index && !netdev_xmit_more()) { + use_scrq_send_direct = true; + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_help(skb)) + use_scrq_send_direct = false; + } + if (skb_shinfo(skb)->nr_frags) { int cur, i; @@ -2303,6 +2352,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_buff->skb = skb; tx_buff->index = bufidx; tx_buff->pool_index = queue_num; + skblen = skb->len; memset(&tx_crq, 0, sizeof(tx_crq)); tx_crq.v1.first = IBMVNIC_CRQ_CMD; @@ -2346,6 +2396,19 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_crq.v1.flags1 |= IBMVNIC_TX_LSO; tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); hdrs += 2; + } else if (use_scrq_send_direct) { + /* See above comment, CSO disabled with direct xmit */ + tx_crq.v1.flags1 &= ~(IBMVNIC_TX_CHKSUM_OFFLOAD); + ind_bufp->index = 1; + tx_buff->num_entries = 1; + netdev_tx_sent_queue(txq, skb->len); + ind_bufp->indir_arr[0] = tx_crq; + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, false); + if (lpar_rc != H_SUCCESS) + goto tx_err; + + tx_dpackets++; + goto early_exit; } if ((*hdrs >> 7) & 1) @@ -2355,7 +2418,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_buff->num_entries = num_entries; /* flush buffer if current entry can not fit */ if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) { - lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); if (lpar_rc != H_SUCCESS) goto tx_flush_err; } @@ -2363,23 +2426,26 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) indir_arr[0] = tx_crq; memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0], num_entries * sizeof(struct ibmvnic_generic_scrq)); + ind_bufp->index += num_entries; if (__netdev_tx_sent_queue(txq, skb->len, netdev_xmit_more() && ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) { - lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq); + lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq, true); if (lpar_rc != H_SUCCESS) goto tx_err; } + tx_bpackets++; + +early_exit: if (atomic_add_return(num_entries, &tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) { netdev_dbg(netdev, "Stopping queue %d\n", queue_num); netif_stop_subqueue(netdev, queue_num); } - tx_packets++; - tx_bytes += skb->len; + tx_bytes += skblen; txq_trans_cond_update(txq); ret = NETDEV_TX_OK; goto out; @@ -2408,10 +2474,11 @@ out: rcu_read_unlock(); netdev->stats.tx_dropped += tx_dropped; netdev->stats.tx_bytes += tx_bytes; - netdev->stats.tx_packets += tx_packets; + netdev->stats.tx_packets += tx_bpackets + tx_dpackets; adapter->tx_send_failed += tx_send_failed; adapter->tx_map_failed += tx_map_failed; - adapter->tx_stats_buffers[queue_num].packets += tx_packets; + adapter->tx_stats_buffers[queue_num].batched_packets += tx_bpackets; + adapter->tx_stats_buffers[queue_num].direct_packets += tx_dpackets; adapter->tx_stats_buffers[queue_num].bytes += tx_bytes; adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped; @@ -3577,7 +3644,10 @@ static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data) memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN); for (i = 0; i < adapter->req_tx_queues; i++) { - snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i); + snprintf(data, ETH_GSTRING_LEN, "tx%d_batched_packets", i); + data += ETH_GSTRING_LEN; + + snprintf(data, ETH_GSTRING_LEN, "tx%d_direct_packets", i); data += ETH_GSTRING_LEN; snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i); @@ -3642,7 +3712,9 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev, (adapter, ibmvnic_stats[i].offset)); for (j = 0; j < adapter->req_tx_queues; j++) { - data[i] = adapter->tx_stats_buffers[j].packets; + data[i] = adapter->tx_stats_buffers[j].batched_packets; + i++; + data[i] = adapter->tx_stats_buffers[j].direct_packets; i++; data[i] = adapter->tx_stats_buffers[j].bytes; i++; diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index e5c6ff3d0c47..f923cdab03f5 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -213,7 +213,8 @@ struct ibmvnic_statistics { #define NUM_TX_STATS 3 struct ibmvnic_tx_queue_stats { - u64 packets; + u64 batched_packets; + u64 direct_packets; u64 bytes; u64 dropped_packets; }; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 53b9fe35d803..7119bce4c091 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -830,6 +830,11 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, f->state = IAVF_VLAN_ADD; adapter->num_vlan_filters++; iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER); + } else if (f->state == IAVF_VLAN_REMOVE) { + /* IAVF_VLAN_REMOVE means that VLAN wasn't yet removed. + * We can safely only change the state here. + */ + f->state = IAVF_VLAN_ACTIVE; } clearout: @@ -850,8 +855,18 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan) f = iavf_find_vlan(adapter, vlan); if (f) { - f->state = IAVF_VLAN_REMOVE; - iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER); + /* IAVF_ADD_VLAN means that VLAN wasn't even added yet. + * Remove it from the list. + */ + if (f->state == IAVF_VLAN_ADD) { + list_del(&f->list); + kfree(f); + adapter->num_vlan_filters--; + } else { + f->state = IAVF_VLAN_REMOVE; + iavf_schedule_aq_request(adapter, + IAVF_FLAG_AQ_DEL_VLAN_FILTER); + } } spin_unlock_bh(&adapter->mac_vlan_list_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index fba178e07600..79074f041e39 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -510,7 +510,7 @@ void ice_init_arfs(struct ice_vsi *vsi) struct hlist_head *arfs_fltr_list; unsigned int i; - if (!vsi || vsi->type != ICE_VSI_PF) + if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi)) return; arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index a163e7717a53..1f62d1183156 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3373,8 +3373,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; - if (q_vector->rx.rx_ring && q_vector->tx.tx_ring) - combined++; + combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx); } return combined; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 4109aa3b2fcd..87ce20540f57 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -359,9 +359,9 @@ const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD] = { /* rx_desk_rsgb_par */ 644531250, /* 644.53125 MHz Reed Solomon gearbox */ /* tx_desk_rsgb_pcs */ - 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + 390625000, /* 390.625 MHz Reed Solomon gearbox */ /* rx_desk_rsgb_pcs */ - 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + 390625000, /* 390.625 MHz Reed Solomon gearbox */ /* tx_fixed_delay */ 1620, /* pmd_adj_divisor */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 42d8e5e771b7..fa9d928081d6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -3551,7 +3551,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) } ice_vfhw_mac_add(vf, &al->list[i]); - vf->num_mac++; break; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index bff3e9662a8f..a9df95088df3 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -1811,6 +1811,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) pf = vf->pf; dev = ice_pf_to_dev(pf); vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err_exit; + } #define ICE_VF_MAX_FDIR_FILTERS 128 if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 2e2caf559d00..d53076210f3a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -665,6 +665,10 @@ static int __init igb_init_module(void) dca_register_notify(&dca_notifier); #endif ret = pci_register_driver(&igb_driver); +#ifdef CONFIG_IGB_DCA + if (ret) + dca_unregister_notify(&dca_notifier); +#endif return ret; } diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index 8187a658dcbd..17a7e3c875cc 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -574,6 +574,7 @@ #define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ #define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ #define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ +#define IGC_PTM_STAT_ALL GENMASK(5, 0) /* Used to clear all status */ /* PCIe PTM Cycle Control */ #define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 6ae2d0b723c8..082f78beeb4e 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6725,6 +6725,7 @@ static int igc_probe(struct pci_dev *pdev, err_register: igc_release_hw_control(adapter); + igc_ptp_stop(adapter); err_eeprom: if (!igc_check_reset_block(hw)) igc_reset_phy(hw); diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 14cd7f995280..8731e8f23494 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -844,45 +844,60 @@ static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) } } +static void igc_ptm_trigger(struct igc_hw *hw) +{ + u32 ctrl; + + /* To "manually" start the PTM cycle we need to set the + * trigger (TRIG) bit + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + /* Perform flush after write to CTRL register otherwise + * transaction may not start + */ + wrfl(); +} + +static void igc_ptm_reset(struct igc_hw *hw) +{ + u32 ctrl; + + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + /* Write to clear all status */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_ALL); +} + static int igc_phc_get_syncdevicetime(ktime_t *device, struct system_counterval_t *system, void *ctx) { - u32 stat, t2_curr_h, t2_curr_l, ctrl; struct igc_adapter *adapter = ctx; struct igc_hw *hw = &adapter->hw; + u32 stat, t2_curr_h, t2_curr_l; int err, count = 100; ktime_t t1, t2_curr; - /* Get a snapshot of system clocks to use as historic value. */ - ktime_get_snapshot(&adapter->snapshot); - + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ do { - /* Doing this in a loop because in the event of a - * badly timed (ha!) system clock adjustment, we may - * get PTM errors from the PCI root, but these errors - * are transitory. Repeating the process returns valid - * data eventually. - */ + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); - /* To "manually" start the PTM cycle we need to clear and - * then set again the TRIG bit. - */ - ctrl = rd32(IGC_PTM_CTRL); - ctrl &= ~IGC_PTM_CTRL_TRIG; - wr32(IGC_PTM_CTRL, ctrl); - ctrl |= IGC_PTM_CTRL_TRIG; - wr32(IGC_PTM_CTRL, ctrl); - - /* The cycle only starts "for real" when software notifies - * that it has read the registers, this is done by setting - * VALID bit. - */ - wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + igc_ptm_trigger(hw); err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, stat, IGC_PTM_STAT_SLEEP, IGC_PTM_STAT_TIMEOUT); + igc_ptm_reset(hw); + if (err < 0) { netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); return err; @@ -891,15 +906,7 @@ static int igc_phc_get_syncdevicetime(ktime_t *device, if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) break; - if (stat & ~IGC_PTM_STAT_VALID) { - /* An error occurred, log it. */ - igc_ptm_log_error(adapter, stat); - /* The STAT register is write-1-to-clear (W1C), - * so write the previous error status to clear it. - */ - wr32(IGC_PTM_STAT, stat); - continue; - } + igc_ptm_log_error(adapter, stat); } while (--count); if (!count) { @@ -1063,8 +1070,12 @@ void igc_ptp_suspend(struct igc_adapter *adapter) **/ void igc_ptp_stop(struct igc_adapter *adapter) { + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + igc_ptp_suspend(adapter); + adapter->ptp_flags &= ~IGC_PTP_ENABLED; if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); netdev_info(adapter->netdev, "PHC removed\n"); @@ -1081,10 +1092,13 @@ void igc_ptp_stop(struct igc_adapter *adapter) void igc_ptp_reset(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; - u32 cycle_ctrl, ctrl; + u32 cycle_ctrl, ctrl, stat; unsigned long flags; u32 timadj; + if (!(adapter->ptp_flags & IGC_PTP_ENABLED)) + return; + /* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@ -1116,14 +1130,19 @@ void igc_ptp_reset(struct igc_adapter *adapter) ctrl = IGC_PTM_CTRL_EN | IGC_PTM_CTRL_START_NOW | IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | - IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | - IGC_PTM_CTRL_TRIG; + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT); wr32(IGC_PTM_CTRL, ctrl); /* Force the first cycle to run. */ - wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + igc_ptm_trigger(hw); + if (readx_poll_timeout_atomic(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT)) + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + + igc_ptm_reset(hw); break; default: /* No work to do. */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index 4b531e8ae38a..afe8a64dbdd4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -195,6 +195,8 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg); dev_err(&adapter->pdev->dev, format, ## arg) #define e_dev_notice(format, arg...) \ dev_notice(&adapter->pdev->dev, format, ## arg) +#define e_dbg(msglvl, format, arg...) \ + netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) #define e_info(msglvl, format, arg...) \ netif_info(adapter, msglvl, adapter->netdev, format, ## arg) #define e_err(msglvl, format, arg...) \ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 198ab9d97618..9132e73e4182 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1051,7 +1051,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, break; } - e_info(drv, "VF %d requested invalid api version %u\n", vf, api); + e_dbg(drv, "VF %d requested unsupported api version %u\n", vf, api); return -1; } diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index 9984ebc62d78..74c121d7abc9 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -623,7 +623,6 @@ void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter) switch (adapter->hw.api_version) { case ixgbe_mbox_api_14: - case ixgbe_mbox_api_15: break; default: return; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 8941f69d93e9..b9dda48326d5 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2707,9 +2707,15 @@ static struct platform_device *port_platdev[3]; static void mv643xx_eth_shared_of_remove(void) { + struct mv643xx_eth_platform_data *pd; int n; for (n = 0; n < 3; n++) { + if (!port_platdev[n]) + continue; + pd = dev_get_platdata(&port_platdev[n]->dev); + if (pd) + of_node_put(pd->phy_node); platform_device_del(port_platdev[n]); port_platdev[n] = NULL; } @@ -2770,8 +2776,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, } ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num); - if (!ppdev) - return -ENOMEM; + if (!ppdev) { + ret = -ENOMEM; + goto put_err; + } ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); ppdev->dev.of_node = pnp; @@ -2793,6 +2801,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev, port_err: platform_device_put(ppdev); +put_err: + of_node_put(ppd.phy_node); return ret; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h index 522d65acfc47..50dc2fcb8f7b 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h @@ -1108,6 +1108,9 @@ struct mvpp2 { /* Spinlocks for CM3 shared memory configuration */ spinlock_t mss_spinlock; + + /* Spinlock for shared PRS parser memory and shadow table */ + spinlock_t prs_spinlock; }; struct mvpp2_pcpu_stats { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 40aeaa7bd739..d2757cc11613 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = { MVPP2_PRS_RI_VLAN_MASK), /* Non IP flow, with vlan tag */ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG, - MVPP22_CLS_HEK_OPT_VLAN, + MVPP22_CLS_HEK_TAGGED, 0, 0), }; diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index bbcdab562513..ec69bb90f574 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -7583,8 +7583,9 @@ static int mvpp2_probe(struct platform_device *pdev) if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23) priv->hw_version = MVPP23; - /* Init mss lock */ + /* Init locks for shared packet processor resources */ spin_lock_init(&priv->mss_spinlock); + spin_lock_init(&priv->prs_spinlock); /* Initialize network controller */ err = mvpp2_init(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index 9af22f497a40..93e978bdf303 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -23,6 +23,8 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -43,11 +45,13 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) } /* Initialize tcam entry from hw */ -int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, - int tid) +static int __mvpp2_prs_init_from_hw(struct mvpp2 *priv, + struct mvpp2_prs_entry *pe, int tid) { int i; + lockdep_assert_held(&priv->prs_spinlock); + if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1) return -EINVAL; @@ -73,6 +77,18 @@ int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, return 0; } +int mvpp2_prs_init_from_hw(struct mvpp2 *priv, struct mvpp2_prs_entry *pe, + int tid) +{ + int err; + + spin_lock_bh(&priv->prs_spinlock); + err = __mvpp2_prs_init_from_hw(priv, pe, tid); + spin_unlock_bh(&priv->prs_spinlock); + + return err; +} + /* Invalidate tcam hw entry */ static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index) { @@ -374,7 +390,7 @@ static int mvpp2_prs_flow_find(struct mvpp2 *priv, int flow) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); bits = mvpp2_prs_sram_ai_get(&pe); /* Sram store classification lookup ID in AI bits [5:0] */ @@ -441,7 +457,7 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); + __mvpp2_prs_init_from_hw(priv, &pe, MVPP2_PE_DROP_ALL); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -469,14 +485,17 @@ static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add) } /* Set port to unicast or multicast promiscuous mode */ -void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, - enum mvpp2_prs_l2_cast l2_cast, bool add) +static void __mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, + bool add) { struct mvpp2_prs_entry pe; unsigned char cast_match; unsigned int ri; int tid; + lockdep_assert_held(&priv->prs_spinlock); + if (l2_cast == MVPP2_PRS_L2_UNI_CAST) { cast_match = MVPP2_PRS_UCAST_VAL; tid = MVPP2_PE_MAC_UC_PROMISCUOUS; @@ -489,7 +508,7 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, /* promiscuous mode - Accept unknown unicast or multicast packets */ if (priv->prs_shadow[tid].valid) { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { memset(&pe, 0, sizeof(pe)); mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -522,6 +541,14 @@ void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, mvpp2_prs_hw_write(priv, &pe); } +void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, + enum mvpp2_prs_l2_cast l2_cast, bool add) +{ + spin_lock_bh(&priv->prs_spinlock); + __mvpp2_prs_mac_promisc_set(priv, port, l2_cast, add); + spin_unlock_bh(&priv->prs_spinlock); +} + /* Set entry for dsa packets */ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, bool tagged, bool extend) @@ -539,7 +566,7 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -610,7 +637,7 @@ static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port, if (priv->prs_shadow[tid].valid) { /* Entry exist - update port only */ - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } else { /* Entry doesn't exist - create new */ memset(&pe, 0, sizeof(pe)); @@ -673,7 +700,7 @@ static int mvpp2_prs_vlan_find(struct mvpp2 *priv, unsigned short tpid, int ai) priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid); if (!match) continue; @@ -726,7 +753,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) @@ -760,7 +787,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ mvpp2_prs_tcam_port_map_set(&pe, port_map); @@ -800,7 +827,7 @@ static int mvpp2_prs_double_vlan_find(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); match = mvpp2_prs_tcam_data_cmp(&pe, 0, tpid1) && mvpp2_prs_tcam_data_cmp(&pe, 4, tpid2); @@ -849,7 +876,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid_aux); + __mvpp2_prs_init_from_hw(priv, &pe, tid_aux); ri_bits = mvpp2_prs_sram_ri_get(&pe); ri_bits &= MVPP2_PRS_RI_VLAN_MASK; if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || @@ -880,7 +907,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1, mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Update ports' mask */ @@ -1213,8 +1240,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) /* Create dummy entries for drop all and promiscuous modes */ mvpp2_prs_drop_fc(priv); mvpp2_prs_mac_drop_all_set(priv, 0, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); - mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false); + __mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false); } /* Set default entries for various types of dsa packets */ @@ -1533,12 +1560,6 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv) struct mvpp2_prs_entry pe; int err; - priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), - MVPP2_PRS_DBL_VLANS_MAX, - GFP_KERNEL); - if (!priv->prs_double_vlans) - return -ENOMEM; - /* Double VLAN: 0x88A8, 0x8100 */ err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021AD, ETH_P_8021Q, MVPP2_PRS_PORT_MASK); @@ -1941,7 +1962,7 @@ static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask) port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID) continue; - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]); mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]); @@ -1970,6 +1991,8 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + /* Scan TCAM and see if entry with this already exist */ tid = mvpp2_prs_vid_range_find(port, vid, mask); @@ -1988,8 +2011,10 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) MVPP2_PRS_VLAN_FILT_MAX_ENTRY); /* There isn't room for a new VID filter */ - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID); pe.index = tid; @@ -1997,7 +2022,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } /* Enable the current port */ @@ -2019,6 +2044,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid) mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2028,15 +2054,16 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid) struct mvpp2 *priv = port->priv; int tid; - /* Scan TCAM and see if entry with this already exist */ + spin_lock_bh(&priv->prs_spinlock); + + /* Invalidate TCAM entry with this , if it exists */ tid = mvpp2_prs_vid_range_find(port, vid, 0xfff); + if (tid >= 0) { + mvpp2_prs_hw_inv(priv, tid); + priv->prs_shadow[tid].valid = false; + } - /* No such entry */ - if (tid < 0) - return; - - mvpp2_prs_hw_inv(priv, tid); - priv->prs_shadow[tid].valid = false; + spin_unlock_bh(&priv->prs_spinlock); } /* Remove all existing VID filters on this port */ @@ -2045,6 +2072,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) struct mvpp2 *priv = port->priv; int tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id); tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) { if (priv->prs_shadow[tid].valid) { @@ -2052,6 +2081,8 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port) priv->prs_shadow[tid].valid = false; } } + + spin_unlock_bh(&priv->prs_spinlock); } /* Remove VID filering entry for this port */ @@ -2060,10 +2091,14 @@ void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port) unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id); struct mvpp2 *priv = port->priv; + spin_lock_bh(&priv->prs_spinlock); + /* Invalidate the guard entry */ mvpp2_prs_hw_inv(priv, tid); priv->prs_shadow[tid].valid = false; + + spin_unlock_bh(&priv->prs_spinlock); } /* Add guard entry that drops packets when no VID is matched on this port */ @@ -2079,6 +2114,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + pe.index = tid; reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id)); @@ -2111,6 +2148,8 @@ void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID); mvpp2_prs_hw_write(priv, &pe); + + spin_unlock_bh(&priv->prs_spinlock); } /* Parser default initialization */ @@ -2118,6 +2157,20 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) { int err, index, i; + priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, + sizeof(*priv->prs_shadow), + GFP_KERNEL); + if (!priv->prs_shadow) + return -ENOMEM; + + priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool), + MVPP2_PRS_DBL_VLANS_MAX, + GFP_KERNEL); + if (!priv->prs_double_vlans) + return -ENOMEM; + + spin_lock_bh(&priv->prs_spinlock); + /* Enable tcam table */ mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); @@ -2136,12 +2189,6 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) mvpp2_prs_hw_inv(priv, index); - priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE, - sizeof(*priv->prs_shadow), - GFP_KERNEL); - if (!priv->prs_shadow) - return -ENOMEM; - /* Always start from lookup = 0 */ for (index = 0; index < MVPP2_MAX_PORTS; index++) mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, @@ -2158,26 +2205,13 @@ int mvpp2_prs_default_init(struct platform_device *pdev, struct mvpp2 *priv) mvpp2_prs_vid_init(priv); err = mvpp2_prs_etype_init(priv); - if (err) - return err; + err = err ? : mvpp2_prs_vlan_init(pdev, priv); + err = err ? : mvpp2_prs_pppoe_init(priv); + err = err ? : mvpp2_prs_ip6_init(priv); + err = err ? : mvpp2_prs_ip4_init(priv); - err = mvpp2_prs_vlan_init(pdev, priv); - if (err) - return err; - - err = mvpp2_prs_pppoe_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip6_init(priv); - if (err) - return err; - - err = mvpp2_prs_ip4_init(priv); - if (err) - return err; - - return 0; + spin_unlock_bh(&priv->prs_spinlock); + return err; } /* Compare MAC DA with tcam entry data */ @@ -2217,7 +2251,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, (priv->prs_shadow[tid].udf != udf_type)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); entry_pmap = mvpp2_prs_tcam_port_map_get(&pe); if (mvpp2_prs_mac_range_equals(&pe, da, mask) && @@ -2229,7 +2263,8 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, } /* Update parser's mac da entry */ -int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +static int __mvpp2_prs_mac_da_accept(struct mvpp2_port *port, + const u8 *da, bool add) { unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; struct mvpp2 *priv = port->priv; @@ -2261,7 +2296,7 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) /* Mask all ports */ mvpp2_prs_tcam_port_map_set(&pe, 0); } else { - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); @@ -2317,6 +2352,17 @@ int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) return 0; } +int mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const u8 *da, bool add) +{ + int err; + + spin_lock_bh(&port->priv->prs_spinlock); + err = __mvpp2_prs_mac_da_accept(port, da, add); + spin_unlock_bh(&port->priv->prs_spinlock); + + return err; +} + int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da) { struct mvpp2_port *port = netdev_priv(dev); @@ -2345,6 +2391,8 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) unsigned long pmap; int index, tid; + spin_lock_bh(&priv->prs_spinlock); + for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END; tid++) { unsigned char da[ETH_ALEN], da_mask[ETH_ALEN]; @@ -2354,7 +2402,7 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) continue; - mvpp2_prs_init_from_hw(priv, &pe, tid); + __mvpp2_prs_init_from_hw(priv, &pe, tid); pmap = mvpp2_prs_tcam_port_map_get(&pe); @@ -2375,14 +2423,17 @@ void mvpp2_prs_mac_del_all(struct mvpp2_port *port) continue; /* Remove entry from TCAM */ - mvpp2_prs_mac_da_accept(port, da, false); + __mvpp2_prs_mac_da_accept(port, da, false); } + + spin_unlock_bh(&priv->prs_spinlock); } int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) { switch (type) { case MVPP2_TAG_TYPE_EDSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to EDSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); @@ -2393,9 +2444,11 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_DSA: + spin_lock_bh(&priv->prs_spinlock); /* Add port to DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, true, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2406,10 +2459,12 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; case MVPP2_TAG_TYPE_MH: case MVPP2_TAG_TYPE_NONE: + spin_lock_bh(&priv->prs_spinlock); /* Remove port form EDSA and DSA entries */ mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); @@ -2419,6 +2474,7 @@ int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type) MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); mvpp2_prs_dsa_tag_set(priv, port, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + spin_unlock_bh(&priv->prs_spinlock); break; default: @@ -2437,11 +2493,15 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&priv->prs_spinlock); + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2461,6 +2521,7 @@ int mvpp2_prs_add_flow(struct mvpp2 *priv, int flow, u32 ri, u32 ri_mask) mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); mvpp2_prs_hw_write(priv, &pe); + spin_unlock_bh(&priv->prs_spinlock); return 0; } @@ -2472,6 +2533,8 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) memset(&pe, 0, sizeof(pe)); + spin_lock_bh(&port->priv->prs_spinlock); + tid = mvpp2_prs_flow_find(port->priv, port->id); /* Such entry not exist */ @@ -2480,8 +2543,10 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) tid = mvpp2_prs_tcam_first_free(port->priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); - if (tid < 0) + if (tid < 0) { + spin_unlock_bh(&port->priv->prs_spinlock); return tid; + } pe.index = tid; @@ -2492,13 +2557,14 @@ int mvpp2_prs_def_flow(struct mvpp2_port *port) /* Update shadow table */ mvpp2_prs_shadow_set(port->priv, pe.index, MVPP2_PRS_LU_FLOWS); } else { - mvpp2_prs_init_from_hw(port->priv, &pe, tid); + __mvpp2_prs_init_from_hw(port->priv, &pe, tid); } mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); mvpp2_prs_tcam_port_map_set(&pe, (1 << port->id)); mvpp2_prs_hw_write(port->priv, &pe); + spin_unlock_bh(&port->priv->prs_spinlock); return 0; } @@ -2509,11 +2575,14 @@ int mvpp2_prs_hits(struct mvpp2 *priv, int index) if (index > MVPP2_PRS_TCAM_SRAM_SIZE) return -EINVAL; + spin_lock_bh(&priv->prs_spinlock); + mvpp2_write(priv, MVPP2_PRS_TCAM_HIT_IDX_REG, index); val = mvpp2_read(priv, MVPP2_PRS_TCAM_HIT_CNT_REG); val &= MVPP2_PRS_TCAM_HIT_CNT_MASK; + spin_unlock_bh(&priv->prs_spinlock); return val; } diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 993ac180a5db..a32d85d6f599 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -32,6 +32,7 @@ config OCTEONTX2_PF tristate "Marvell OcteonTX2 NIC Physical Function driver" select OCTEONTX2_MBOX select NET_DEVLINK + select PAGE_POOL depends on (64BIT && COMPILE_TEST) || ARM64 select DIMLIB depends on PCI diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index 6b4792a942d8..d8001bfd39a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -350,7 +350,7 @@ u8 rpm_get_lmac_type(void *rpmd, int lmac_id) int err; req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req); - err = cgx_fwi_cmd_generic(req, &resp, rpm, 0); + err = cgx_fwi_cmd_generic(req, &resp, rpm, lmac_id); if (!err) return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp); return err; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index c7829265eade..d9c68f8166af 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2562,7 +2562,7 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); - vfs -= 64; + vfs = 64; } intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index f9faa5b23bb9..5cd45846237e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -13,19 +13,26 @@ /* RVU LMTST */ #define LMT_TBL_OP_READ 0 #define LMT_TBL_OP_WRITE 1 -#define LMT_MAP_TABLE_SIZE (128 * 1024) #define LMT_MAPTBL_ENTRY_SIZE 16 +#define LMT_MAX_VFS 256 + +#define LMT_MAP_ENTRY_ENA BIT_ULL(20) +#define LMT_MAP_ENTRY_LINES GENMASK_ULL(18, 16) /* Function to perform operations (read/write) on lmtst map table */ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, int lmt_tbl_op) { void __iomem *lmt_map_base; - u64 tbl_base; + u64 tbl_base, cfg; + int pfs, vfs; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + cfg = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + vfs = 1 << (cfg & 0xF); + pfs = 1 << ((cfg >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + lmt_map_base = ioremap_wc(tbl_base, pfs * vfs * LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); return -ENOMEM; @@ -35,6 +42,13 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, *val = readq(lmt_map_base + index); } else { writeq((*val), (lmt_map_base + index)); + + cfg = FIELD_PREP(LMT_MAP_ENTRY_ENA, 0x1); + /* 2048 LMTLINES */ + cfg |= FIELD_PREP(LMT_MAP_ENTRY_LINES, 0x6); + + writeq(cfg, (lmt_map_base + (index + 8))); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S * changes effective. Write 1 for flush and read is being used as a * barrier and sets up a data dependency. Write to 0 after a write @@ -52,7 +66,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, #define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { - return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + return ((rvu_get_pf(pcifunc) * LMT_MAX_VFS) + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } @@ -69,7 +83,7 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, mutex_lock(&rvu->rsrc_lock); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); - pf = rvu_get_pf(pcifunc) & 0x1F; + pf = rvu_get_pf(pcifunc) & RVU_PFVF_PF_MASK; val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index a3c1d82032f5..aa2ab987eb75 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -580,6 +580,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, u64 lmt_addr, val, tbl_base; int pf, vf, num_vfs, hw_vfs; void __iomem *lmt_map_base; + int apr_pfs, apr_vfs; int buf_size = 10240; size_t off = 0; int index = 0; @@ -595,8 +596,12 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, return -ENOMEM; tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + val = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CFG); + apr_vfs = 1 << (val & 0xF); + apr_pfs = 1 << ((val >> 4) & 0x7); - lmt_map_base = ioremap_wc(tbl_base, 128 * 1024); + lmt_map_base = ioremap_wc(tbl_base, apr_pfs * apr_vfs * + LMT_MAPTBL_ENTRY_SIZE); if (!lmt_map_base) { dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); kfree(buf); @@ -618,7 +623,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t", pf); - index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE; + index = pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE; off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t", (tbl_base + index)); lmt_addr = readq(lmt_map_base + index); @@ -631,7 +636,7 @@ static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp, /* Reading num of VFs per PF */ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs); for (vf = 0; vf < num_vfs; vf++) { - index = (pf * rvu->hw->total_vfs * 16) + + index = (pf * apr_vfs * LMT_MAPTBL_ENTRY_SIZE) + ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE); off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d:VF%d \t\t", pf, vf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index b9a4efb95533..32fa8f2c5f4e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -217,7 +217,7 @@ static void rvu_nix_unregister_interrupts(struct rvu *rvu) rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false; } - for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++) + for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++) if (rvu->irq_allocated[offs + i]) { free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl); rvu->irq_allocated[offs + i] = false; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 59d8d1ba15c2..7417087b6db5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) } #define NPA_MAX_BURST 16 -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; + int cnt = cq->pool_ptrs; u64 ptrs[NPA_MAX_BURST]; - int num_ptrs = 1; dma_addr_t bufptr; + int num_ptrs = 1; /* Refill pool with new buffers */ while (cq->pool_ptrs) { @@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) num_ptrs = 1; } } + return cnt - cq->pool_ptrs; } void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) @@ -201,6 +203,11 @@ int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf) rsp = (struct nix_bandprof_alloc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } + if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) { rc = -EIO; goto out; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 8ae96815865e..c1861f7de254 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) return weight; } -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); int cn10k_lmtst_init(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index a487a98eac88..6da8d8f2a870 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -428,7 +428,8 @@ static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf, if (sw_tx_sc->encrypt) sectag_tci |= (MCS_TCI_E | MCS_TCI_C); - policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu); + policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, + pfvf->netdev->mtu + OTX2_ETH_HLEN); /* Write SecTag excluding AN bits(1..0) */ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2); policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index c00d6d67db51..4fc73ac721b0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -513,11 +513,32 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx) (pfvf->hw.cq_ecount_wait - 1)); } +static int otx2_alloc_pool_buf(struct otx2_nic *pfvf, struct otx2_pool *pool, + dma_addr_t *dma) +{ + unsigned int offset = 0; + struct page *page; + size_t sz; + + sz = SKB_DATA_ALIGN(pool->rbsize); + sz = ALIGN(sz, OTX2_ALIGN); + + page = page_pool_alloc_frag(pool->page_pool, &offset, sz, GFP_ATOMIC); + if (unlikely(!page)) + return -ENOMEM; + + *dma = page_pool_get_dma_addr(page) + offset; + return 0; +} + static int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, dma_addr_t *dma) { u8 *buf; + if (pool->page_pool) + return otx2_alloc_pool_buf(pfvf, pool, dma); + buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN); if (unlikely(!buf)) return -ENOMEM; @@ -546,20 +567,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma) { - if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) { - struct refill_work *work; - struct delayed_work *dwork; - - work = &pfvf->refill_wrk[cq->cq_idx]; - dwork = &work->pool_refill_work; - /* Schedule a task if no other task is running */ - if (!cq->refill_task_sched) { - cq->refill_task_sched = true; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); - } + if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) return -ENOMEM; - } return 0; } @@ -967,6 +976,7 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) int err, pool_id, non_xdp_queues; struct nix_aq_enq_req *aq; struct otx2_cq_queue *cq; + struct otx2_pool *pool; cq = &qset->cq[qidx]; cq->cq_idx = qidx; @@ -975,8 +985,13 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) cq->cq_type = CQ_RX; cq->cint_idx = qidx; cq->cqe_cnt = qset->rqe_cnt; - if (pfvf->xdp_prog) + if (pfvf->xdp_prog) { + pool = &qset->pool[qidx]; xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0); + xdp_rxq_info_reg_mem_model(&cq->xdp_rxq, + MEM_TYPE_PAGE_POOL, + pool->page_pool); + } } else if (qidx < non_xdp_queues) { cq->cq_type = CQ_TX; cq->cint_idx = qidx - pfvf->hw.rx_queues; @@ -1054,39 +1069,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx) static void otx2_pool_refill_task(struct work_struct *work) { struct otx2_cq_queue *cq; - struct otx2_pool *rbpool; struct refill_work *wrk; - int qidx, free_ptrs = 0; struct otx2_nic *pfvf; - dma_addr_t bufptr; + int qidx; wrk = container_of(work, struct refill_work, pool_refill_work.work); pfvf = wrk->pf; qidx = wrk - pfvf->refill_wrk; cq = &pfvf->qset.cq[qidx]; - rbpool = cq->rbpool; - free_ptrs = cq->pool_ptrs; - while (cq->pool_ptrs) { - if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) { - /* Schedule a WQ if we fails to free atleast half of the - * pointers else enable napi for this RQ. - */ - if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) { - struct delayed_work *dwork; - - dwork = &wrk->pool_refill_work; - schedule_delayed_work(dwork, - msecs_to_jiffies(100)); - } else { - cq->refill_task_sched = false; - } - return; - } - pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM); - cq->pool_ptrs--; - } cq->refill_task_sched = false; + + local_bh_disable(); + napi_schedule(wrk->napi); + local_bh_enable(); } int otx2_config_nix_queues(struct otx2_nic *pfvf) @@ -1206,10 +1202,31 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf) } } +void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, + u64 iova, int size) +{ + struct page *page; + u64 pa; + + pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); + page = virt_to_head_page(phys_to_virt(pa)); + + if (pool->page_pool) { + page_pool_put_full_page(pool->page_pool, page, true); + } else { + dma_unmap_page_attrs(pfvf->dev, iova, size, + DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + + put_page(page); + } +} + void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) { int pool_id, pool_start = 0, pool_end = 0, size = 0; - u64 iova, pa; + struct otx2_pool *pool; + u64 iova; if (type == AURA_NIX_SQ) { pool_start = otx2_get_pool_idx(pfvf, type, 0); @@ -1225,15 +1242,13 @@ void otx2_free_aura_ptr(struct otx2_nic *pfvf, int type) /* Free SQB and RQB pointers from the aura pool */ for (pool_id = pool_start; pool_id < pool_end; pool_id++) { iova = otx2_aura_allocptr(pfvf, pool_id); + pool = &pfvf->qset.pool[pool_id]; while (iova) { if (type == AURA_NIX_RQ) iova -= OTX2_HEAD_ROOM; - pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); - dma_unmap_page_attrs(pfvf->dev, iova, size, - DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - put_page(virt_to_page(phys_to_virt(pa))); + otx2_free_bufs(pfvf, pool, iova, size); + iova = otx2_aura_allocptr(pfvf, pool_id); } } @@ -1251,6 +1266,8 @@ void otx2_aura_pool_free(struct otx2_nic *pfvf) pool = &pfvf->qset.pool[pool_id]; qmem_free(pfvf->dev, pool->stack); qmem_free(pfvf->dev, pool->fc_addr); + page_pool_destroy(pool->page_pool); + pool->page_pool = NULL; } devm_kfree(pfvf->dev, pfvf->qset.pool); pfvf->qset.pool = NULL; @@ -1334,8 +1351,9 @@ int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, } int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, - int stack_pages, int numptrs, int buf_size) + int stack_pages, int numptrs, int buf_size, int type) { + struct page_pool_params pp_params = { 0 }; struct npa_aq_enq_req *aq; struct otx2_pool *pool; int err; @@ -1379,6 +1397,23 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, aq->ctype = NPA_AQ_CTYPE_POOL; aq->op = NPA_AQ_INSTOP_INIT; + if (type != AURA_NIX_RQ) { + pool->page_pool = NULL; + return 0; + } + + pp_params.order = get_order(buf_size); + pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP; + pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs); + pp_params.nid = NUMA_NO_NODE; + pp_params.dev = pfvf->dev; + pp_params.dma_dir = DMA_FROM_DEVICE; + pool->page_pool = page_pool_create(&pp_params); + if (IS_ERR(pool->page_pool)) { + netdev_err(pfvf->netdev, "Creation of page pool failed\n"); + return PTR_ERR(pool->page_pool); + } + return 0; } @@ -1413,7 +1448,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf) /* Initialize pool context */ err = otx2_pool_init(pfvf, pool_id, stack_pages, - num_sqbs, hw->sqb_size); + num_sqbs, hw->sqb_size, AURA_NIX_SQ); if (err) goto fail; } @@ -1476,7 +1511,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf) } for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) { err = otx2_pool_init(pfvf, pool_id, stack_pages, - num_ptrs, pfvf->rbsize); + num_ptrs, pfvf->rbsize, AURA_NIX_RQ); if (err) goto fail; } @@ -1660,7 +1695,6 @@ int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable) req->bpid_per_chan = 0; #endif - return otx2_sync_mbox_msg(&pfvf->mbox); } EXPORT_SYMBOL(otx2_nix_config_bp); @@ -1786,6 +1820,10 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf) if (!rc) { rsp = (struct nix_hw_info *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } /* HW counts VLAN insertion bytes (8 for double tag) * irrespective of whether SQE is requesting to insert VLAN diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index c15d1864a637..1d3c4180d341 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -280,6 +280,7 @@ struct flr_work { struct refill_work { struct delayed_work pool_refill_work; struct otx2_nic *pf; + struct napi_struct *napi; }; /* PTPv2 originTimestamp structure */ @@ -347,7 +348,7 @@ struct dev_hw_ops { int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura); void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq, int size, int qidx); - void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); + int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq); void (*aura_freeptr)(void *dev, int aura, u64 buf); }; @@ -934,7 +935,7 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool, int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable); void otx2_ctx_disable(struct mbox *mbox, int type, bool npa); int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable); -void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx); void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq); int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura); int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); @@ -942,7 +943,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, dma_addr_t *dma); int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id, - int stack_pages, int numptrs, int buf_size); + int stack_pages, int numptrs, int buf_size, int type); int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, int pool_id, int numptrs); @@ -1012,6 +1013,8 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf); int otx2_handle_ntuple_tc_features(struct net_device *netdev, netdev_features_t features); int otx2_smq_flush(struct otx2_nic *pfvf, int smq); +void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, + u64 iova, int size); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c index 28fb643d2917..1a13f003f3db 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c @@ -311,6 +311,11 @@ int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf) if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pfc_rsp *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto unlock; + } + if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) { dev_warn(pfvf->dev, "Failed to config PFC\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c index 80d853b343f9..2046dd0da00d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -28,6 +28,11 @@ static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, if (!err) { rsp = (struct cgx_mac_addr_add_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pf->mbox.lock); + return PTR_ERR(rsp); + } + *dmac_index = rsp->index; } @@ -200,6 +205,10 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u32 bit_pos) rsp = (struct cgx_mac_addr_update_rsp *) otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + rc = PTR_ERR(rsp); + goto out; + } pf->flow_cfg->bmap_to_dmacindex[bit_pos] = rsp->index; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index af779ae40d3c..2d6f6edb1510 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -333,6 +333,11 @@ static void otx2_get_pauseparam(struct net_device *netdev, if (!otx2_sync_mbox_msg(&pfvf->mbox)) { rsp = (struct cgx_pause_frm_cfg *) otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pfvf->mbox.lock); + return; + } + pause->rx_pause = rsp->rx_pause; pause->tx_pause = rsp->tx_pause; } @@ -1072,6 +1077,11 @@ static int otx2_set_fecparam(struct net_device *netdev, rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto end; + } + if (rsp->fec >= 0) pfvf->linfo.fec = rsp->fec; else diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 5c757508322b..7c7f8814fb3f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -121,6 +121,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) + goto exit; for (ent = 0; ent < rsp->count; ent++) flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent]; @@ -199,6 +201,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + mutex_unlock(&pfvf->mbox.lock); + return PTR_ERR(rsp); + } if (rsp->count != req->count) { netdev_info(pfvf->netdev, @@ -234,6 +240,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) frsp = (struct npc_get_field_status_rsp *)otx2_mbox_get_rsp (&pfvf->mbox.mbox, 0, &freq->hdr); + if (IS_ERR(frsp)) { + mutex_unlock(&pfvf->mbox.lock); + return PTR_ERR(frsp); + } if (frsp->enable) { pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 6b7fb324e756..c29cb56caf08 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1591,7 +1591,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) struct nix_lf_free_req *free_req; struct mbox *mbox = &pf->mbox; struct otx2_cq_queue *cq; + struct otx2_pool *pool; struct msg_req *req; + int pool_id; int qidx; /* Ensure all SQE are processed */ @@ -1618,7 +1620,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) for (qidx = 0; qidx < qset->cq_cnt; qidx++) { cq = &qset->cq[qidx]; if (cq->cq_type == CQ_RX) - otx2_cleanup_rx_cqes(pf, cq); + otx2_cleanup_rx_cqes(pf, cq, qidx); else otx2_cleanup_tx_cqes(pf, cq); } @@ -1629,6 +1631,13 @@ static void otx2_free_hw_resources(struct otx2_nic *pf) /* Free RQ buffer pointers*/ otx2_free_aura_ptr(pf, AURA_NIX_RQ); + for (qidx = 0; qidx < pf->hw.rx_queues; qidx++) { + pool_id = otx2_get_pool_idx(pf, AURA_NIX_RQ, qidx); + pool = &pf->qset.pool[pool_id]; + page_pool_destroy(pool->page_pool); + pool->page_pool = NULL; + } + otx2_free_cq_res(pf); /* Free all ingress bandwidth profiles allocated */ @@ -1996,6 +2005,10 @@ int otx2_stop(struct net_device *netdev) netif_tx_disable(netdev); + for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) + cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); + devm_kfree(pf->dev, pf->refill_wrk); + otx2_free_hw_resources(pf); otx2_free_cints(pf, pf->hw.cint_cnt); otx2_disable_napi(pf); @@ -2003,9 +2016,6 @@ int otx2_stop(struct net_device *netdev) for (qidx = 0; qidx < netdev->num_tx_queues; qidx++) netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx)); - for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++) - cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work); - devm_kfree(pf->dev, pf->refill_wrk); kfree(qset->sq); kfree(qset->cq); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index e579183e5239..31caf7a7a3ad 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -218,9 +218,6 @@ static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, va - page_address(page) + off, len - off, pfvf->rbsize); - - otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM, - pfvf->rbsize, DMA_FROM_DEVICE); return true; } @@ -383,6 +380,8 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf, if (pfvf->netdev->features & NETIF_F_RXCSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_mark_for_recycle(skb); + napi_gro_frags(napi); } @@ -429,9 +428,10 @@ process_cqe: return processed_cqe; } -void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) +int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) { struct otx2_nic *pfvf = dev; + int cnt = cq->pool_ptrs; dma_addr_t bufptr; while (cq->pool_ptrs) { @@ -440,6 +440,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM); cq->pool_ptrs--; } + + return cnt - cq->pool_ptrs; } static int otx2_tx_napi_handler(struct otx2_nic *pfvf, @@ -533,6 +535,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) struct otx2_cq_queue *cq; struct otx2_qset *qset; struct otx2_nic *pfvf; + int filled_cnt = -1; cq_poll = container_of(napi, struct otx2_cq_poll, napi); pfvf = (struct otx2_nic *)cq_poll->dev; @@ -553,7 +556,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) } if (rx_cq && rx_cq->pool_ptrs) - pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); + filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq); /* Clear the IRQ */ otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0)); @@ -566,9 +569,25 @@ int otx2_napi_handler(struct napi_struct *napi, int budget) if (pfvf->flags & OTX2_FLAG_ADPTV_INT_COAL_ENABLED) otx2_adjust_adaptive_coalese(pfvf, cq_poll); - /* Re-enable interrupts */ - otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), - BIT_ULL(0)); + if (unlikely(!filled_cnt)) { + struct refill_work *work; + struct delayed_work *dwork; + + work = &pfvf->refill_wrk[cq->cq_idx]; + dwork = &work->pool_refill_work; + /* Schedule a task if no other task is running */ + if (!cq->refill_task_sched) { + work->napi = napi; + cq->refill_task_sched = true; + schedule_delayed_work(dwork, + msecs_to_jiffies(100)); + } + } else { + /* Re-enable interrupts */ + otx2_write64(pfvf, + NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), + BIT_ULL(0)); + } } return workdone; } @@ -1191,11 +1210,13 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, } EXPORT_SYMBOL(otx2_sq_append_skb); -void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) +void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int qidx) { struct nix_cqe_rx_s *cqe; + struct otx2_pool *pool; int processed_cqe = 0; - u64 iova, pa; + u16 pool_id; + u64 iova; if (pfvf->xdp_prog) xdp_rxq_info_unreg(&cq->xdp_rxq); @@ -1203,6 +1224,9 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe) return; + pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_RQ, qidx); + pool = &pfvf->qset.pool[pool_id]; + while (cq->pend_cqe) { cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq); processed_cqe++; @@ -1215,9 +1239,8 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq) continue; } iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM; - pa = otx2_iova_to_phys(pfvf->iommu_domain, iova); - otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize, DMA_FROM_DEVICE); - put_page(virt_to_page(phys_to_virt(pa))); + + otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize); } /* Free CQEs to HW */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 7ab6db9a986f..a82ffca8ce1b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -23,6 +23,8 @@ #define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN) #define OTX2_MIN_MTU 60 +#define OTX2_PAGE_POOL_SZ 2048 + #define OTX2_MAX_GSO_SEGS 255 #define OTX2_MAX_FRAGS_IN_SQE 9 @@ -118,6 +120,7 @@ struct otx2_cq_poll { struct otx2_pool { struct qmem *stack; struct qmem *fc_addr; + struct page_pool *page_pool; u16 rbsize; }; @@ -167,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); -void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); -void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); +int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); #endif /* OTX2_TXRX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c index e142d43f5a62..95a2c8e616bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c @@ -63,7 +63,7 @@ static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx) /* Initialize pool context */ err = otx2_pool_init(pfvf, pool_id, stack_pages, - num_sqbs, hw->sqb_size); + num_sqbs, hw->sqb_size, AURA_NIX_SQ); if (err) goto aura_free; diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index cf456d62677f..1e8568a96443 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1394,18 +1394,15 @@ static int pxa168_eth_probe(struct platform_device *pdev) printk(KERN_NOTICE "PXA168 10/100 Ethernet Driver\n"); - clk = devm_clk_get(&pdev->dev, NULL); + clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) { - dev_err(&pdev->dev, "Fast Ethernet failed to get clock\n"); + dev_err(&pdev->dev, "Fast Ethernet failed to get and enable clock\n"); return -ENODEV; } - clk_prepare_enable(clk); dev = alloc_etherdev(sizeof(struct pxa168_eth_private)); - if (!dev) { - err = -ENOMEM; - goto err_clk; - } + if (!dev) + return -ENOMEM; platform_set_drvdata(pdev, dev); pep = netdev_priv(dev); @@ -1523,8 +1520,6 @@ err_free_mdio: mdiobus_free(pep->smi_bus); err_netdev: free_netdev(dev); -err_clk: - clk_disable_unprepare(clk); return err; } @@ -1542,7 +1537,6 @@ static int pxa168_eth_remove(struct platform_device *pdev) if (dev->phydev) phy_disconnect(dev->phydev); - clk_disable_unprepare(pep->clk); mdiobus_unregister(pep->smi_bus); mdiobus_free(pep->smi_bus); unregister_netdev(dev); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index ab33ba1c3023..31bcced59f2f 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -130,6 +130,7 @@ static const struct pci_device_id sky2_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */ diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index 6a72687d5b83..8cb8d47227f5 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -34,8 +34,10 @@ struct mtk_flow_data { u16 vlan_in; struct { - u16 id; - __be16 proto; + struct { + u16 id; + __be16 proto; + } vlans[2]; u8 num; } vlan; struct { @@ -321,18 +323,19 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) case FLOW_ACTION_CSUM: break; case FLOW_ACTION_VLAN_PUSH: - if (data.vlan.num == 1 || + if (data.vlan.num + data.pppoe.num == 2 || act->vlan.proto != htons(ETH_P_8021Q)) return -EOPNOTSUPP; - data.vlan.id = act->vlan.vid; - data.vlan.proto = act->vlan.proto; + data.vlan.vlans[data.vlan.num].id = act->vlan.vid; + data.vlan.vlans[data.vlan.num].proto = act->vlan.proto; data.vlan.num++; break; case FLOW_ACTION_VLAN_POP: break; case FLOW_ACTION_PPPOE_PUSH: - if (data.pppoe.num == 1) + if (data.pppoe.num == 1 || + data.vlan.num == 2) return -EOPNOTSUPP; data.pppoe.sid = act->pppoe.sid; @@ -422,12 +425,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) foe.bridge.vlan = data.vlan_in; - if (data.vlan.num == 1) { - if (data.vlan.proto != htons(ETH_P_8021Q)) - return -EOPNOTSUPP; + for (i = 0; i < data.vlan.num; i++) + mtk_foe_entry_set_vlan(eth, &foe, data.vlan.vlans[i].id); - mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id); - } if (data.pppoe.num == 1) mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid); diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index ad27749c0931..c42e9f741f95 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1164,6 +1164,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget) struct net_device *ndev = priv->ndev; unsigned int head = ring->head; unsigned int entry = ring->tail; + unsigned long flags; while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) { ret = mtk_star_tx_complete_one(priv); @@ -1183,9 +1184,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget) netif_wake_queue(ndev); if (napi_complete(napi)) { - spin_lock(&priv->lock); + spin_lock_irqsave(&priv->lock, flags); mtk_star_enable_dma_irq(priv, false, true); - spin_unlock(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); } return 0; @@ -1342,16 +1343,16 @@ push_new_skb: static int mtk_star_rx_poll(struct napi_struct *napi, int budget) { struct mtk_star_priv *priv; + unsigned long flags; int work_done = 0; priv = container_of(napi, struct mtk_star_priv, rx_napi); work_done = mtk_star_rx(priv, budget); - if (work_done < budget) { - napi_complete_done(napi, work_done); - spin_lock(&priv->lock); + if (work_done < budget && napi_complete_done(napi, work_done)) { + spin_lock_irqsave(&priv->lock, flags); mtk_star_enable_dma_irq(priv, true, false); - spin_unlock(&priv->lock); + spin_unlock_irqrestore(&priv->lock, flags); } return work_done; diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index b330020dc0d6..f2bded847e61 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -682,9 +682,9 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) } static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, - struct mlx4_db *db, int order) + struct mlx4_db *db, unsigned int order) { - int o; + unsigned int o; int i; for (o = order; o <= 1; ++o) { @@ -712,7 +712,7 @@ found: return 0; } -int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_db_pgdir *pgdir; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 7fccf1a79f09..95eae224bbb4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -446,6 +446,8 @@ int mlx4_en_process_tx_cq(struct net_device *dev, if (unlikely(!priv->port_up)) return 0; + if (unlikely(!napi_budget) && cq->type == TX_XDP) + return 0; netdev_txq_bql_complete_prefetchw(ring->tx_queue); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 4a1eb6cd699c..6dbb4021fd2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1003,6 +1003,7 @@ static void cmd_work_handler(struct work_struct *work) complete(&ent->done); } up(&cmd->vars.sem); + complete(&ent->slotted); return; } } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index be7302aa6f86..33cc53f221e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -390,7 +390,7 @@ u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * - PAGE_SIZE; + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu)); } @@ -818,7 +818,8 @@ static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk) { - int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); @@ -1027,7 +1028,8 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { - int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE; + int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * + MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE; u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL)); int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params)); u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index ce85b48d327d..6748c92941b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -48,15 +48,10 @@ mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw) struct list_head *iter; netdev_for_each_lower_dev(dev, lower, iter) { - struct mlx5_core_dev *mdev; - struct mlx5e_priv *priv; - if (!mlx5e_eswitch_rep(lower)) continue; - priv = netdev_priv(lower); - mdev = priv->mdev; - if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw)) + if (mlx5_esw_bridge_dev_same_esw(lower, esw)) return lower; } @@ -121,7 +116,7 @@ static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device * priv = netdev_priv(rep); mdev = priv->mdev; if (netif_is_lag_master(dev)) - return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev); + return mlx5_lag_is_master(mdev); return true; } @@ -436,6 +431,9 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, if (!rep) return NOTIFY_DONE; + if (netif_is_lag_master(dev) && !mlx5_lag_is_shared_fdb(esw->dev)) + return NOTIFY_DONE; + switch (event) { case SWITCHDEV_FDB_ADD_TO_BRIDGE: fdb_info = container_of(info, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index f01f7dfdbcf8..b011e0d2b620 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -862,7 +862,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, return 0; err_rule: - mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh); + mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, attr, zone_rule->mh); mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id); err_mod_hdr: kfree(attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 907ad6ffe727..407556334495 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -4,6 +4,7 @@ #include #include #include "tc_tun_encap.h" +#include "fs_core.h" #include "en_tc.h" #include "tc_tun.h" #include "rep/tc.h" @@ -23,10 +24,18 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv, route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex); - if (!route_dev || !netif_is_ovs_master(route_dev) || - attr->parse_attr->filter_dev == e->out_dev) + if (!route_dev || !netif_is_ovs_master(route_dev)) goto out; + if (priv->mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS && + mlx5e_eswitch_uplink_rep(attr->parse_attr->filter_dev) && + (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) { + mlx5_core_warn(priv->mdev, + "Matching on external port with encap + fwd to table actions is not allowed for firmware steering\n"); + err = -EINVAL; + goto out; + } + err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex, MLX5E_TC_INT_PORT_EGRESS, &attr->action, out_index); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 2e0335246967..6d56d4a9977b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -665,7 +665,7 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, while (remaining > 0) { skb_frag_t *frag = &record->frags[i]; - get_page(skb_frag_page(frag)); + page_ref_inc(skb_frag_page(frag)); remaining -= skb_frag_size(frag); info->frags[i++] = *frag; } @@ -768,7 +768,7 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, stats = sq->stats; mlx5e_tx_dma_unmap(sq->pdev, dma); - put_page(wi->resync_dump_frag_page); + page_ref_dec(wi->resync_dump_frag_page); stats->tls_dump_packets++; stats->tls_dump_bytes += wi->num_bytes; } @@ -821,12 +821,12 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, err_out: for (; i < info.nr_frags; i++) - /* The put_page() here undoes the page ref obtained in tx_sync_info_get(). + /* The page_ref_dec() here undoes the page ref obtained in tx_sync_info_get(). * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be * released only upon their completions (or in mlx5e_free_txqsq_descs, * if channel closes). */ - put_page(skb_frag_page(&info.frags[i])); + page_ref_dec(skb_frag_page(&info.frags[i])); return MLX5E_KTLS_SYNC_FAIL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 385904502a6b..887d44635400 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4023,6 +4023,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n"); + features &= ~NETIF_F_HW_MACSEC; + if (netdev->features & NETIF_F_HW_MACSEC) + netdev_warn(netdev, "Disabling HW MACsec offload, not supported in switchdev mode\n"); + return features; } @@ -4791,11 +4795,9 @@ static int mlx5e_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; u8 mode, setting; - int err; - err = mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting); - if (err) - return err; + if (mlx5_eswitch_get_vepa(mdev->priv.eswitch, &setting)) + return -EOPNOTSUPP; mode = setting ? BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB; return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, @@ -5980,7 +5982,9 @@ static void mlx5e_remove(struct auxiliary_device *adev) mlx5e_dcbnl_delete_app(priv); unregister_netdev(priv->netdev); mlx5e_suspend(adev, state); - priv->profile->cleanup(priv); + /* Avoid cleanup if profile rollback failed. */ + if (priv->profile) + priv->profile->cleanup(priv); mlx5e_devlink_port_unregister(priv); mlx5e_destroy_netdev(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 5aeca9534f15..b4980245b50b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -61,6 +61,7 @@ #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 +#define MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE 0x8 static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; @@ -705,6 +706,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev) /* RQ */ mlx5e_build_rq_params(mdev, params); + if (!mlx5e_is_uplink_rep(priv) && mlx5_core_is_ecpf(mdev)) + params->log_rq_mtu_frames = MLX5E_REP_PARAMS_DEF_LOG_RQ_SIZE; /* CQ moderation params */ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); @@ -726,6 +729,8 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev, netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; netdev->watchdog_timeo = 15 * HZ; + if (mlx5_core_is_ecpf(mdev)) + netdev->tx_queue_len = 1 << MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 08a75654f5f1..c170503b3aac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c @@ -165,6 +165,9 @@ mlx5e_test_loopback_validate(struct sk_buff *skb, struct udphdr *udph; struct iphdr *iph; + if (skb_linearize(skb)) + goto out; + /* We are only going to peek, no need to clone the SKB */ if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb)) goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 433cdd0a2cf3..5237abbdcda1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3320,7 +3320,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) int err; mutex_init(&esw->offloads.termtbl_mutex); - mlx5_rdma_enable_roce(esw->dev); + err = mlx5_rdma_enable_roce(esw->dev); + if (err) + goto err_roce; err = mlx5_esw_host_number_init(esw); if (err) @@ -3378,6 +3380,7 @@ err_vport_metadata: esw_offloads_metadata_uninit(esw); err_metadata: mlx5_rdma_disable_roce(esw->dev); +err_roce: mutex_destroy(&esw->offloads.termtbl_mutex); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index 9459e56ee90a..6aa96d33c210 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -163,11 +163,16 @@ static int temp_warn(struct notifier_block *nb, unsigned long type, void *data) u64 value_msb; value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); + /* bit 1-63 are not supported for NICs, + * hence read only bit 0 (asic) from lsb. + */ + value_lsb &= 0x1; value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); - mlx5_core_warn(events->dev, - "High temperature on sensors with bit set %llx %llx", - value_msb, value_lsb); + if (net_ratelimit()) + mlx5_core_warn(events->dev, + "High temperature on sensors with bit set %llx %llx", + value_msb, value_lsb); return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 164e10b5f9b7..2717450e9666 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1880,13 +1880,22 @@ lookup_fte_locked(struct mlx5_flow_group *g, fte_tmp = NULL; goto out; } - if (!fte_tmp->node.active) { - tree_put_node(&fte_tmp->node, false); - fte_tmp = NULL; - goto out; - } nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); + + if (!fte_tmp->node.active) { + up_write_ref_node(&fte_tmp->node, false); + + if (take_write) + up_write_ref_node(&g->node, false); + else + up_read_ref_node(&g->node); + + tree_put_node(&fte_tmp->node, false); + + return NULL; + } + out: if (take_write) up_write_ref_node(&g->node, false); @@ -2412,6 +2421,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, break; case MLX5_FLOW_NAMESPACE_RDMA_TX: root_ns = steering->rdma_tx_root_ns; + prio = RDMA_TX_BYPASS_PRIO; break; case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS: root_ns = steering->rdma_rx_root_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 65483dab9057..b4faac12789d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -850,6 +850,7 @@ static void poll_health(struct timer_list *t) health->prev = count; if (health->miss_counter == MAX_MISSES) { mlx5_core_err(dev, "device's health compromised - reached miss count\n"); + health->synd = ioread8(&h->synd); print_health_info(dev); queue_work(health->wq, &health->report_work); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index 005661248c7e..9faa9ef863a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -540,7 +540,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, set_tt_map(port_sel, hash_type); err = mlx5_lag_create_definers(ldev, hash_type, ports); if (err) - return err; + goto clear_port_sel; if (port_sel->tunnel) { err = mlx5_lag_create_inner_ttc_table(ldev); @@ -559,6 +559,8 @@ destroy_inner: mlx5_destroy_ttc_table(port_sel->inner.ttc); destroy_definers: mlx5_lag_destroy_definers(ldev); +clear_port_sel: + memset(port_sel, 0, sizeof(*port_sel)); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 2ac255bb918b..133e8220aaea 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -186,17 +186,16 @@ static void mlx5_pps_out(struct work_struct *work) } } -static void mlx5_timestamp_overflow(struct work_struct *work) +static long mlx5_timestamp_overflow(struct ptp_clock_info *ptp_info) { - struct delayed_work *dwork = to_delayed_work(work); struct mlx5_core_dev *mdev; struct mlx5_timer *timer; struct mlx5_clock *clock; unsigned long flags; - timer = container_of(dwork, struct mlx5_timer, overflow_work); - clock = container_of(timer, struct mlx5_clock, timer); + clock = container_of(ptp_info, struct mlx5_clock, ptp_info); mdev = container_of(clock, struct mlx5_core_dev, clock); + timer = &clock->timer; if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto out; @@ -207,7 +206,7 @@ static void mlx5_timestamp_overflow(struct work_struct *work) write_sequnlock_irqrestore(&clock->lock, flags); out: - schedule_delayed_work(&timer->overflow_work, timer->overflow_period); + return timer->overflow_period; } static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev, @@ -375,6 +374,7 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) timer->nominal_c_mult + diff; mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); + ptp_schedule_worker(clock->ptp, timer->overflow_period); return 0; } @@ -708,6 +708,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { .settime64 = mlx5_ptp_settime, .enable = NULL, .verify = NULL, + .do_aux_work = mlx5_timestamp_overflow, }; static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin, @@ -908,12 +909,11 @@ static void mlx5_init_overflow_period(struct mlx5_clock *clock) do_div(ns, NSEC_PER_SEC / HZ); timer->overflow_period = ns; - INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow); - if (timer->overflow_period) - schedule_delayed_work(&timer->overflow_work, 0); - else + if (!timer->overflow_period) { + timer->overflow_period = HZ; mlx5_core_warn(mdev, - "invalid overflow period, overflow_work is not scheduled\n"); + "invalid overflow period, overflow_work is scheduled once per second\n"); + } if (clock_info) clock_info->overflow_period = timer->overflow_period; @@ -999,6 +999,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT); mlx5_eq_notifier_register(mdev, &clock->pps_nb); + + if (clock->ptp) + ptp_schedule_worker(clock->ptp, 0); } void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) @@ -1015,7 +1018,6 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) } cancel_work_sync(&clock->pps_info.out_work); - cancel_delayed_work_sync(&clock->timer.overflow_work); if (mdev->clock_info) { free_page((unsigned long)mdev->clock_info); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index df58cba37930..64c1071bece8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -196,6 +196,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, ns = mlx5_get_flow_namespace(chains->dev, chains->ns); } + if (!ns) { + mlx5_core_warn(chains->dev, "Failed to get flow namespace\n"); + return ERR_PTR(-EOPNOTSUPP); + } + ft_attr.autogroup.num_reserved_entries = 2; ft_attr.autogroup.max_num_groups = chains->group_num; ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index a6d3fc96e168..10b9dc2aaf06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -513,7 +513,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name, pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ; pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ; mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d", - name, size, start); + name ? name : "mlx5_pcif_pool", size, start); return pool; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c index 540cf05f6373..e61a4fa46d77 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c @@ -130,8 +130,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid * static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev) { + u8 mac[ETH_ALEN] = {}; union ib_gid gid; - u8 mac[ETH_ALEN]; mlx5_rdma_make_default_gid(dev, &gid); return mlx5_core_roce_gid_set(dev, 0, @@ -152,17 +152,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) mlx5_nic_vport_disable_roce(dev); } -void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) +int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { int err; if (!MLX5_CAP_GEN(dev, roce)) - return; + return 0; err = mlx5_nic_vport_enable_roce(dev); if (err) { mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err); - return; + return err; } err = mlx5_rdma_add_roce_addr(dev); @@ -177,10 +177,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) goto del_roce_addr; } - return; + return err; del_roce_addr: mlx5_rdma_del_roce_addr(dev); disable_roce: mlx5_nic_vport_disable_roce(dev); + return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h index 750cff2a71a4..3d9e76c3d42f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h @@ -8,12 +8,12 @@ #ifdef CONFIG_MLX5_ESWITCH -void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); +int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev); void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev); #else /* CONFIG_MLX5_ESWITCH */ -static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {} +static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {} #endif /* CONFIG_MLX5_ESWITCH */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index fc6ae49b5ecc..d462017c6a95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -195,7 +195,9 @@ dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport) if (ret) { mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret); kvfree(vport_caps); - return ERR_PTR(ret); + if (ret == -EBUSY) + return ERR_PTR(-EBUSY); + return NULL; } return vport_caps; diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c index 46245e0b2462..43c84900369a 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c @@ -14,7 +14,6 @@ #define MLXFW_FSM_STATE_WAIT_TIMEOUT_MS 30000 #define MLXFW_FSM_STATE_WAIT_ROUNDS \ (MLXFW_FSM_STATE_WAIT_TIMEOUT_MS / MLXFW_FSM_STATE_WAIT_CYCLE_MS) -#define MLXFW_FSM_MAX_COMPONENT_SIZE (10 * (1 << 20)) static const int mlxfw_fsm_state_errno[] = { [MLXFW_FSM_STATE_ERR_ERROR] = -EIO, @@ -229,7 +228,6 @@ static int mlxfw_flash_component(struct mlxfw_dev *mlxfw_dev, return err; } - comp_max_size = min_t(u32, comp_max_size, MLXFW_FSM_MAX_COMPONENT_SIZE); if (comp->data_size > comp_max_size) { MLXFW_ERR_MSG(mlxfw_dev, extack, "Component size is bigger than limit", -EINVAL); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index c8ff2a6d7e90..57ab91133774 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -970,6 +970,7 @@ enum mlxsw_sp_acl_profile { }; struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); +struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl); int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_flow_block *block, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 6c5af018546f..93b71106b4c5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -40,6 +40,11 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl) return acl->afk; } +struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl) +{ + return &acl->tcam; +} + struct mlxsw_sp_acl_ruleset_ht_key { struct mlxsw_sp_flow_block *block; u32 chain_index; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 685bcf8cbfa9..6796edb24951 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -747,13 +747,13 @@ static void mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region) { + struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl); const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; ops->region_fini(mlxsw_sp, region->priv); mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region); mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region); - mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, - region->id); + mlxsw_sp_acl_tcam_region_id_put(tcam, region->id); kfree(region); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index dcd79d7e2af4..ab4852e0925d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev, err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp); if (err) return; - mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); + err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl); + if (err) + return; for (i = 0; i < len; i++) { data[data_index + i] = hw_stats[i].getter(ppcnt_pl); if (!hw_stats[i].cells_bytes) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index b3472fb94617..8883ef012747 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, parms = mlxsw_sp_ipip_netdev_parms4(to_dev); ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp, - 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0, - 0); + 0, 0, tun->net, parms.link, tun->fwmark, 0, 0); rt = ip_route_output_key(tun->net, &fl4); if (IS_ERR(rt)) diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index 0b2eaed11072..fd3555419179 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -1943,6 +1943,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, if (nr_frags <= 0) { tx->frame_data0 |= TX_DESC_DATA0_LS_; tx->frame_data0 |= TX_DESC_DATA0_IOC_; + tx->frame_last = tx->frame_first; } tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); @@ -2012,6 +2013,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, tx->frame_first = 0; tx->frame_data0 = 0; tx->frame_tail = 0; + tx->frame_last = 0; return -ENOMEM; } @@ -2052,16 +2054,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx, TX_DESC_DATA0_DTYPE_DATA_) { tx->frame_data0 |= TX_DESC_DATA0_LS_; tx->frame_data0 |= TX_DESC_DATA0_IOC_; + tx->frame_last = tx->frame_tail; } - tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; - buffer_info = &tx->buffer_info[tx->frame_tail]; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last]; + buffer_info = &tx->buffer_info[tx->frame_last]; buffer_info->skb = skb; if (time_stamp) buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED; if (ignore_sync) buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; tx_descriptor->data0 = cpu_to_le32(tx->frame_data0); tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); tx->last_tail = tx->frame_tail; @@ -3249,6 +3253,7 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, struct pci_dev *pdev) { struct lan743x_tx *tx; + u32 sgmii_ctl; int index; int ret; @@ -3261,6 +3266,15 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, spin_lock_init(&adapter->eth_syslock_spinlock); mutex_init(&adapter->sgmii_rw_lock); pci11x1x_set_rfe_rd_fifo_threshold(adapter); + sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); + if (adapter->is_sgmii_en) { + sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; + } else { + sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; + sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; + } + lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); } else { adapter->max_tx_channels = LAN743X_MAX_TX_CHANNELS; adapter->used_tx_channels = LAN743X_USED_TX_CHANNELS; @@ -3309,7 +3323,6 @@ static int lan743x_hardware_init(struct lan743x_adapter *adapter, static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) { - u32 sgmii_ctl; int ret; adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); @@ -3321,10 +3334,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) adapter->mdiobus->priv = (void *)adapter; if (adapter->is_pci11x1x) { if (adapter->is_sgmii_en) { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl |= SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl &= ~SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "SGMII operation\n"); adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45; @@ -3334,10 +3343,6 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus-c45\n"); } else { - sgmii_ctl = lan743x_csr_read(adapter, SGMII_CTL); - sgmii_ctl &= ~SGMII_CTL_SGMII_ENABLE_; - sgmii_ctl |= SGMII_CTL_SGMII_POWER_DN_; - lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl); netif_dbg(adapter, drv, adapter->netdev, "RGMII operation\n"); // Only C22 support when RGMII I/F diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h index 92a5660b8820..c0d209f36188 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.h +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -974,6 +974,7 @@ struct lan743x_tx { u32 frame_first; u32 frame_data0; u32 frame_tail; + u32 frame_last; struct lan743x_tx_buffer_info *buffer_info; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 7031f41287e0..1ed69e77b895 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -680,12 +680,11 @@ static int sparx5_start(struct sparx5 *sparx5) err = -ENXIO; if (sparx5->fdma_irq >= 0) { if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0) - err = devm_request_threaded_irq(sparx5->dev, - sparx5->fdma_irq, - NULL, - sparx5_fdma_handler, - IRQF_ONESHOT, - "sparx5-fdma", sparx5); + err = devm_request_irq(sparx5->dev, + sparx5->fdma_irq, + sparx5_fdma_handler, + 0, + "sparx5-fdma", sparx5); if (!err) err = sparx5_fdma_start(sparx5); if (err) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index 212bf6f4ed72..e1df6bc86949 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -1113,7 +1113,7 @@ int sparx5_port_init(struct sparx5 *sparx5, spx5_inst_rmw(DEV10G_MAC_MAXLEN_CFG_MAX_LEN_SET(ETH_MAXLEN), DEV10G_MAC_MAXLEN_CFG_MAX_LEN, devinst, - DEV10G_MAC_ENA_CFG(0)); + DEV10G_MAC_MAXLEN_CFG(0)); /* Handle Signal Detect in 10G PCS */ spx5_inst_wr(PCS10G_BR_PCS_SD_CFG_SD_POL_SET(sd_pol) | diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index d674ebda2053..9e55679796d9 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -995,7 +995,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req, header->inline_oob_size_div4 = client_oob_size / sizeof(u32); if (oob_in_sgl) { - WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2); + WARN_ON_ONCE(wqe_req->num_sge < 2); header->client_oob_in_sgl = 1; diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 310a36356f56..203cb4978544 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -416,9 +416,158 @@ static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot, return VLAN_N_VID - bridge_num - 1; } +/** + * ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID + * + * @ocelot: Switch private data structure + * @port: Index of ingress port + * + * IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN + * component conformance" suggest that a C-VLAN component should only recognize + * and filter on C-Tags, and an S-VLAN component should only recognize and + * process based on C-Tags. + * + * In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN + * components are largely represented by a bridge with vlan_protocol 802.1Q, + * and S-VLAN components by a bridge with vlan_protocol 802.1ad. + * + * Currently the driver only offloads vlan_protocol 802.1Q, but the hardware + * design is non-conformant, because the switch assigns each frame to a VLAN + * based on an entirely different question, as detailed in figure "Basic VLAN + * Classification Flow" from its manual and reproduced below. + * + * Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register + * if VLAN_AWARE_ENA[port] and frame has outer tag then: + * if VLAN_INNER_TAG_ENA[port] and frame has inner tag then: + * TAG_TYPE = (Frame.InnerTPID <> 0x8100) + * Set PCP, DEI, VID to values from inner VLAN header + * else: + * TAG_TYPE = (Frame.OuterTPID <> 0x8100) + * Set PCP, DEI, VID to values from outer VLAN header + * if VID == 0 then: + * VID = VLAN_CFG.VLAN_VID + * + * Summarized, the switch will recognize both 802.1Q and 802.1ad TPIDs as VLAN + * "with equal rights", and just set the TAG_TYPE bit to 0 (if 802.1Q) or to 1 + * (if 802.1ad). It will classify based on whichever of the tags is "outer", no + * matter what TPID that may have (or "inner", if VLAN_INNER_TAG_ENA[port]). + * + * In the VLAN Table, the TAG_TYPE information is not accessible - just the + * classified VID is - so it is as if each VLAN Table entry is for 2 VLANs: + * C-VLAN X, and S-VLAN X. + * + * Whereas the Linux bridge behavior is to only filter on frames with a TPID + * equal to the vlan_protocol, and treat everything else as VLAN-untagged. + * + * Consider an ingress packet tagged with 802.1ad VID=3 and 802.1Q VID=5, + * received on a bridge vlan_filtering=1 vlan_protocol=802.1Q port. This frame + * should be treated as 802.1Q-untagged, and classified to the PVID of that + * bridge port. Not to VID=3, and not to VID=5. + * + * The VCAP IS1 TCAM has everything we need to overwrite the choices made in + * the basic VLAN classification pipeline: it can match on TAG_TYPE in the key, + * and it can modify the classified VID in the action. Thus, for each port + * under a vlan_filtering bridge, we can insert a rule in VCAP IS1 lookup 0 to + * match on 802.1ad tagged frames and modify their classified VID to the 802.1Q + * PVID of the port. This effectively makes it appear to the outside world as + * if those packets were processed as VLAN-untagged. + * + * The rule needs to be updated each time the bridge PVID changes, and needs + * to be deleted if the bridge PVID is deleted, or if the port becomes + * VLAN-unaware. + */ +static int ocelot_update_vlan_reclassify_rule(struct ocelot *ocelot, int port) +{ + unsigned long cookie = OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port); + struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1]; + struct ocelot_port *ocelot_port = ocelot->ports[port]; + const struct ocelot_bridge_vlan *pvid_vlan; + struct ocelot_vcap_filter *filter; + int err, val, pcp, dei; + bool vid_replace_ena; + u16 vid; + + pvid_vlan = ocelot_port->pvid_vlan; + vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan; + + filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie, + false); + if (!vid_replace_ena) { + /* If the reclassification filter doesn't need to exist, delete + * it if it was previously installed, and exit doing nothing + * otherwise. + */ + if (filter) + return ocelot_vcap_filter_del(ocelot, filter); + + return 0; + } + + /* The reclassification rule must apply. See if it already exists + * or if it must be created. + */ + + /* Treating as VLAN-untagged means using as classified VID equal to + * the bridge PVID, and PCP/DEI set to the port default QoS values. + */ + vid = pvid_vlan->vid; + val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port); + pcp = ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val); + dei = !!(val & ANA_PORT_QOS_CFG_DP_DEFAULT_VAL); + + if (filter) { + bool changed = false; + + /* Filter exists, just update it */ + if (filter->action.vid != vid) { + filter->action.vid = vid; + changed = true; + } + if (filter->action.pcp != pcp) { + filter->action.pcp = pcp; + changed = true; + } + if (filter->action.dei != dei) { + filter->action.dei = dei; + changed = true; + } + + if (!changed) + return 0; + + return ocelot_vcap_filter_replace(ocelot, filter); + } + + /* Filter doesn't exist, create it */ + filter = kzalloc(sizeof(*filter), GFP_KERNEL); + if (!filter) + return -ENOMEM; + + filter->key_type = OCELOT_VCAP_KEY_ANY; + filter->ingress_port_mask = BIT(port); + filter->vlan.tpid = OCELOT_VCAP_BIT_1; + filter->prio = 1; + filter->id.cookie = cookie; + filter->id.tc_offload = false; + filter->block_id = VCAP_IS1; + filter->type = OCELOT_VCAP_FILTER_OFFLOAD; + filter->lookup = 0; + filter->action.vid_replace_ena = true; + filter->action.pcp_dei_ena = true; + filter->action.vid = vid; + filter->action.pcp = pcp; + filter->action.dei = dei; + + err = ocelot_vcap_filter_add(ocelot, filter, NULL); + if (err) + kfree(filter); + + return err; +} + /* Default vlan to clasify for untagged frames (may be zero) */ -static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, - const struct ocelot_bridge_vlan *pvid_vlan) +static int ocelot_port_set_pvid(struct ocelot *ocelot, int port, + const struct ocelot_bridge_vlan *pvid_vlan) { struct ocelot_port *ocelot_port = ocelot->ports[port]; u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge); @@ -438,15 +587,23 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, * happens automatically), but also 802.1p traffic which gets * classified to VLAN 0, but that is always in our RX filter, so it * would get accepted were it not for this setting. + * + * Also, we only support the bridge 802.1Q VLAN protocol, so + * 802.1ad-tagged frames (carrying S-Tags) should be considered + * 802.1Q-untagged, and also dropped. */ if (!pvid_vlan && ocelot_port->vlan_aware) val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | - ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA; + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA; ocelot_rmw_gix(ocelot, val, ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA | - ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA, + ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA | + ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA, ANA_PORT_DROP_CFG, port); + + return ocelot_update_vlan_reclassify_rule(ocelot, port); } static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot, @@ -594,7 +751,10 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M, ANA_PORT_VLAN_CFG, port); - ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); + err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan); + if (err) + return err; + ocelot_port_manage_port_tag(ocelot, port); return 0; @@ -633,6 +793,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged) { + struct ocelot_port *ocelot_port = ocelot->ports[port]; int err; /* Ignore VID 0 added to our RX filter by the 8021q module, since @@ -647,9 +808,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, return err; /* Default ingress vlan classification */ - if (pvid) - ocelot_port_set_pvid(ocelot, port, - ocelot_bridge_vlan_find(ocelot, vid)); + if (pvid) { + err = ocelot_port_set_pvid(ocelot, port, + ocelot_bridge_vlan_find(ocelot, vid)); + if (err) + return err; + } else if (ocelot_port->pvid_vlan && + ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) { + err = ocelot_port_set_pvid(ocelot, port, NULL); + if (err) + return err; + } /* Untagged egress vlan clasification */ ocelot_port_manage_port_tag(ocelot, port); @@ -675,8 +844,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) return err; /* Ingress */ - if (del_pvid) - ocelot_port_set_pvid(ocelot, port, NULL); + if (del_pvid) { + err = ocelot_port_set_pvid(ocelot, port, NULL); + if (err) + return err; + } /* Egress */ ocelot_port_manage_port_tag(ocelot, port); @@ -1161,7 +1333,7 @@ void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port, memset(ifh, 0, OCELOT_TAG_LEN); ocelot_ifh_set_bypass(ifh, 1); - ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports)); + ocelot_ifh_set_src(ifh, ocelot->num_phys_ports); ocelot_ifh_set_dest(ifh, BIT_ULL(port)); ocelot_ifh_set_qos_class(ifh, qos_class); ocelot_ifh_set_tag_type(ifh, tag_type); @@ -2502,7 +2674,7 @@ int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio) ANA_PORT_QOS_CFG, port); - return 0; + return ocelot_update_vlan_reclassify_rule(ocelot, port); } EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio); diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index cb32234a5bf1..34a2d8ea3b2d 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -14,6 +14,8 @@ #include #include "ocelot.h" +#define OCELOT_PTP_TX_TSTAMP_TIMEOUT (5 * HZ) + int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts) { struct ocelot *ocelot = container_of(ptp, struct ocelot, ptp_info); @@ -495,6 +497,28 @@ static int ocelot_traps_to_ptp_rx_filter(unsigned int proto) return HWTSTAMP_FILTER_NONE; } +static int ocelot_ptp_tx_type_to_cmd(int tx_type, int *ptp_cmd) +{ + switch (tx_type) { + case HWTSTAMP_TX_ON: + *ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; + break; + case HWTSTAMP_TX_ONESTEP_SYNC: + /* IFH_REW_OP_ONE_STEP_PTP updates the correctionField, + * what we need to update is the originTimestamp. + */ + *ptp_cmd = IFH_REW_OP_ORIGIN_PTP; + break; + case HWTSTAMP_TX_OFF: + *ptp_cmd = 0; + break; + default: + return -ERANGE; + } + + return 0; +} + int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr) { struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -521,30 +545,19 @@ EXPORT_SYMBOL(ocelot_hwstamp_get); int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) { struct ocelot_port *ocelot_port = ocelot->ports[port]; + int ptp_cmd, old_ptp_cmd = ocelot_port->ptp_cmd; bool l2 = false, l4 = false; struct hwtstamp_config cfg; + bool old_l2, old_l4; int err; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) return -EFAULT; /* Tx type sanity check */ - switch (cfg.tx_type) { - case HWTSTAMP_TX_ON: - ocelot_port->ptp_cmd = IFH_REW_OP_TWO_STEP_PTP; - break; - case HWTSTAMP_TX_ONESTEP_SYNC: - /* IFH_REW_OP_ONE_STEP_PTP updates the correctional field, we - * need to update the origin time. - */ - ocelot_port->ptp_cmd = IFH_REW_OP_ORIGIN_PTP; - break; - case HWTSTAMP_TX_OFF: - ocelot_port->ptp_cmd = 0; - break; - default: - return -ERANGE; - } + err = ocelot_ptp_tx_type_to_cmd(cfg.tx_type, &ptp_cmd); + if (err) + return err; switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: @@ -569,13 +582,27 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) return -ERANGE; } + old_l2 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L2; + old_l4 = ocelot_port->trap_proto & OCELOT_PROTO_PTP_L4; + err = ocelot_setup_ptp_traps(ocelot, port, l2, l4); if (err) return err; + ocelot_port->ptp_cmd = ptp_cmd; + cfg.rx_filter = ocelot_traps_to_ptp_rx_filter(ocelot_port->trap_proto); - return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; + if (copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg))) { + err = -EFAULT; + goto out_restore_ptp_traps; + } + + return 0; +out_restore_ptp_traps: + ocelot_setup_ptp_traps(ocelot, port, old_l2, old_l4); + ocelot_port->ptp_cmd = old_ptp_cmd; + return err; } EXPORT_SYMBOL(ocelot_hwstamp_set); @@ -607,34 +634,87 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port, } EXPORT_SYMBOL(ocelot_get_ts_info); -static int ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, +static struct sk_buff *ocelot_port_dequeue_ptp_tx_skb(struct ocelot *ocelot, + int port, u8 ts_id, + u32 seqid) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct sk_buff *skb, *skb_tmp, *skb_match = NULL; + struct ptp_header *hdr; + + spin_lock(&ocelot->ts_id_lock); + + skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { + if (OCELOT_SKB_CB(skb)->ts_id != ts_id) + continue; + + /* Check that the timestamp ID is for the expected PTP + * sequenceId. We don't have to test ptp_parse_header() against + * NULL, because we've pre-validated the packet's ptp_class. + */ + hdr = ptp_parse_header(skb, OCELOT_SKB_CB(skb)->ptp_class); + if (seqid != ntohs(hdr->sequence_id)) + continue; + + __skb_unlink(skb, &ocelot_port->tx_skbs); + ocelot->ptp_skbs_in_flight--; + skb_match = skb; + break; + } + + spin_unlock(&ocelot->ts_id_lock); + + return skb_match; +} + +static int ocelot_port_queue_ptp_tx_skb(struct ocelot *ocelot, int port, struct sk_buff *clone) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - unsigned long flags; + DECLARE_BITMAP(ts_id_in_flight, OCELOT_MAX_PTP_ID); + struct sk_buff *skb, *skb_tmp; + unsigned long n; - spin_lock_irqsave(&ocelot->ts_id_lock, flags); + spin_lock(&ocelot->ts_id_lock); - if (ocelot_port->ptp_skbs_in_flight == OCELOT_MAX_PTP_ID || - ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { - spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); + /* To get a better chance of acquiring a timestamp ID, first flush the + * stale packets still waiting in the TX timestamping queue. They are + * probably lost. + */ + skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { + if (time_before(OCELOT_SKB_CB(skb)->ptp_tx_time + + OCELOT_PTP_TX_TSTAMP_TIMEOUT, jiffies)) { + dev_warn_ratelimited(ocelot->dev, + "port %d invalidating stale timestamp ID %u which seems lost\n", + port, OCELOT_SKB_CB(skb)->ts_id); + __skb_unlink(skb, &ocelot_port->tx_skbs); + kfree_skb(skb); + ocelot->ptp_skbs_in_flight--; + } else { + __set_bit(OCELOT_SKB_CB(skb)->ts_id, ts_id_in_flight); + } + } + + if (ocelot->ptp_skbs_in_flight == OCELOT_PTP_FIFO_SIZE) { + spin_unlock(&ocelot->ts_id_lock); return -EBUSY; } - skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS; - /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ - OCELOT_SKB_CB(clone)->ts_id = ocelot_port->ts_id; + n = find_first_zero_bit(ts_id_in_flight, OCELOT_MAX_PTP_ID); + if (n == OCELOT_MAX_PTP_ID) { + spin_unlock(&ocelot->ts_id_lock); + return -EBUSY; + } - ocelot_port->ts_id++; - if (ocelot_port->ts_id == OCELOT_MAX_PTP_ID) - ocelot_port->ts_id = 0; - - ocelot_port->ptp_skbs_in_flight++; + /* Found an available timestamp ID, use it */ + OCELOT_SKB_CB(clone)->ts_id = n; + OCELOT_SKB_CB(clone)->ptp_tx_time = jiffies; ocelot->ptp_skbs_in_flight++; + __skb_queue_tail(&ocelot_port->tx_skbs, clone); - skb_queue_tail(&ocelot_port->tx_skbs, clone); + spin_unlock(&ocelot->ts_id_lock); - spin_unlock_irqrestore(&ocelot->ts_id_lock, flags); + dev_dbg_ratelimited(ocelot->dev, "port %d timestamp id %lu\n", port, n); return 0; } @@ -691,10 +771,14 @@ int ocelot_port_txtstamp_request(struct ocelot *ocelot, int port, if (!(*clone)) return -ENOMEM; - err = ocelot_port_add_txtstamp_skb(ocelot, port, *clone); - if (err) + /* Store timestamp ID in OCELOT_SKB_CB(clone)->ts_id */ + err = ocelot_port_queue_ptp_tx_skb(ocelot, port, *clone); + if (err) { + kfree_skb(*clone); return err; + } + skb_shinfo(*clone)->tx_flags |= SKBTX_IN_PROGRESS; OCELOT_SKB_CB(skb)->ptp_cmd = ptp_cmd; OCELOT_SKB_CB(*clone)->ptp_class = ptp_class; } @@ -730,28 +814,15 @@ static void ocelot_get_hwtimestamp(struct ocelot *ocelot, spin_unlock_irqrestore(&ocelot->ptp_clock_lock, flags); } -static bool ocelot_validate_ptp_skb(struct sk_buff *clone, u16 seqid) -{ - struct ptp_header *hdr; - - hdr = ptp_parse_header(clone, OCELOT_SKB_CB(clone)->ptp_class); - if (WARN_ON(!hdr)) - return false; - - return seqid == ntohs(hdr->sequence_id); -} - void ocelot_get_txtstamp(struct ocelot *ocelot) { int budget = OCELOT_PTP_QUEUE_SZ; while (budget--) { - struct sk_buff *skb, *skb_tmp, *skb_match = NULL; struct skb_shared_hwtstamps shhwtstamps; u32 val, id, seqid, txport; - struct ocelot_port *port; + struct sk_buff *skb_match; struct timespec64 ts; - unsigned long flags; val = ocelot_read(ocelot, SYS_PTP_STATUS); @@ -766,36 +837,14 @@ void ocelot_get_txtstamp(struct ocelot *ocelot) txport = SYS_PTP_STATUS_PTP_MESS_TXPORT_X(val); seqid = SYS_PTP_STATUS_PTP_MESS_SEQ_ID(val); - port = ocelot->ports[txport]; - - spin_lock(&ocelot->ts_id_lock); - port->ptp_skbs_in_flight--; - ocelot->ptp_skbs_in_flight--; - spin_unlock(&ocelot->ts_id_lock); - /* Retrieve its associated skb */ -try_again: - spin_lock_irqsave(&port->tx_skbs.lock, flags); - - skb_queue_walk_safe(&port->tx_skbs, skb, skb_tmp) { - if (OCELOT_SKB_CB(skb)->ts_id != id) - continue; - __skb_unlink(skb, &port->tx_skbs); - skb_match = skb; - break; - } - - spin_unlock_irqrestore(&port->tx_skbs.lock, flags); - - if (WARN_ON(!skb_match)) - continue; - - if (!ocelot_validate_ptp_skb(skb_match, seqid)) { - dev_err_ratelimited(ocelot->dev, - "port %d received stale TX timestamp for seqid %d, discarding\n", - txport, seqid); - dev_kfree_skb_any(skb); - goto try_again; + skb_match = ocelot_port_dequeue_ptp_tx_skb(ocelot, txport, id, + seqid); + if (!skb_match) { + dev_warn_ratelimited(ocelot->dev, + "port %d received TX timestamp (seqid %d, ts id %u) for packet previously declared stale\n", + txport, seqid, id); + goto next_ts; } /* Get the h/w timestamp */ @@ -806,7 +855,7 @@ try_again: shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); skb_complete_tx_timestamp(skb_match, &shhwtstamps); - /* Next ts */ +next_ts: ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT); } } diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c index 73cdec5ca6a3..5734b86aed5b 100644 --- a/drivers/net/ethernet/mscc/ocelot_vcap.c +++ b/drivers/net/ethernet/mscc/ocelot_vcap.c @@ -695,6 +695,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix, vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc); vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc); vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged); + vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TPID, tag->tpid); vcap_key_set(vcap, &data, VCAP_IS1_HK_VID, tag->vid.value, tag->vid.mask); vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c index 2ec62c8d86e1..59486fe2ad18 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c @@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) struct sk_buff *skb; skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL); + if (!skb) + return NULL; skb_put(skb, size); return skb; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index 9d97cd281f18..c03558adda91 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, map_id_full = be64_to_cpu(cbe->map_ptr); map_id = map_id_full; - if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) + if (size_add(pkt_size, data_size) > INT_MAX || + len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) return -EINVAL; if (cbe->hdr.ver != NFP_CCM_ABI_VERSION) return -EINVAL; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index d7370fb60a16..928ef2933990 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -828,8 +828,8 @@ static int ionic_get_module_eeprom(struct net_device *netdev, len = min_t(u32, sizeof(xcvr->sprom), ee->len); do { - memcpy(data, xcvr->sprom, len); - memcpy(tbuf, xcvr->sprom, len); + memcpy(data, &xcvr->sprom[ee->offset], len); + memcpy(tbuf, &xcvr->sprom[ee->offset], len); /* Let's make sure we got a consistent copy */ if (!memcmp(data, tbuf, len)) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 14865fc245da..b746944bcd2a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -3484,8 +3484,8 @@ int ionic_lif_register(struct ionic_lif *lif) /* only register LIF0 for now */ err = register_netdev(lif->netdev); if (err) { - dev_err(lif->ionic->dev, "Cannot register net device, aborting\n"); - ionic_lif_unregister_phc(lif); + dev_err(lif->ionic->dev, "Cannot register net device: %d, aborting\n", err); + ionic_lif_unregister(lif); return err; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 16e6bd466143..6218d9c26855 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -3314,7 +3314,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, if (rc) return rc; - if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) + if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED)) + rc = -EOPNOTSUPP; + else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) rc = -EINVAL; return rc; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index dc43e74147fb..4bc950d36607 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -204,7 +204,7 @@ static struct pci_driver qede_pci_driver = { }; static struct qed_eth_cb_ops qede_ll_ops = { - { + .common = { #ifdef CONFIG_RFS_ACCEL .arfs_filter_op = qede_arfs_filter_op, #endif diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index c9c8225f04d6..747cc5e2bb78 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -28,16 +28,19 @@ struct qede_ptp { }; /** - * qede_ptp_adjfreq() - Adjust the frequency of the PTP cycle counter. + * qede_ptp_adjfine() - Adjust the frequency of the PTP cycle counter. * * @info: The PTP clock info structure. - * @ppb: Parts per billion adjustment from base. + * @scaled_ppm: Scaled parts per million adjustment from base. + * + * Scaled parts per million is ppm with a 16-bit binary fractional field. * * Return: Zero on success, negative errno otherwise. */ -static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) +static int qede_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm) { struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info); + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); struct qede_dev *edev = ptp->edev; int rc; @@ -47,7 +50,7 @@ static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb) rc = ptp->ops->adjfreq(edev->cdev, ppb); spin_unlock_bh(&ptp->lock); } else { - DP_ERR(edev, "PTP adjfreq called while interface is down\n"); + DP_ERR(edev, "PTP adjfine called while interface is down\n"); rc = -EFAULT; } __qede_unlock(edev); @@ -462,7 +465,7 @@ int qede_ptp_enable(struct qede_dev *edev) ptp->clock_info.n_ext_ts = 0; ptp->clock_info.n_per_out = 0; ptp->clock_info.pps = 0; - ptp->clock_info.adjfreq = qede_ptp_adjfreq; + ptp->clock_info.adjfine = qede_ptp_adjfine; ptp->clock_info.adjtime = qede_ptp_adjtime; ptp->clock_info.gettime64 = qede_ptp_gettime; ptp->clock_info.settime64 = qede_ptp_settime; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index f9dd50152b1e..d57b976b9040 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -454,8 +454,10 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter, num_vlans = sriov->num_allowed_vlans; sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL); - if (!sriov->allowed_vlans) + if (!sriov->allowed_vlans) { + qlcnic_sriov_free_vlans(adapter); return -ENOMEM; + } vlans = (u16 *)&cmd->rsp.arg[3]; for (i = 0; i < num_vlans; i++) @@ -1482,8 +1484,11 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_o } cmd_op = (cmd.rsp.arg[0] & 0xff); - if (cmd.rsp.arg[0] >> 25 == 2) - return 2; + if (cmd.rsp.arg[0] >> 25 == 2) { + ret = 2; + goto out; + } + if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) set_bit(QLC_BC_VF_STATE, &vf->state); else @@ -2167,8 +2172,10 @@ int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter) vf = &sriov->vf_info[i]; vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans, sizeof(*vf->sriov_vlans), GFP_KERNEL); - if (!vf->sriov_vlans) + if (!vf->sriov_vlans) { + qlcnic_sriov_free_vlans(adapter); return -ENOMEM; + } } return 0; diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 926a087ae1c6..b7af824116ea 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -67,7 +67,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000." #define QCASPI_PLUGGABLE_MIN 0 #define QCASPI_PLUGGABLE_MAX 1 -static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN; +static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX; module_param(qcaspi_pluggable, int, 0); MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no)."); @@ -829,7 +829,6 @@ qcaspi_netdev_init(struct net_device *dev) dev->mtu = QCAFRM_MAX_MTU; dev->type = ARPHRD_ETHER; - qca->clkspeed = qcaspi_clkspeed; qca->burst_len = qcaspi_burst_len; qca->spi_thread = NULL; qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN + @@ -918,17 +917,15 @@ qca_spi_probe(struct spi_device *spi) legacy_mode = of_property_read_bool(spi->dev.of_node, "qca,legacy-mode"); - if (qcaspi_clkspeed == 0) { - if (spi->max_speed_hz) - qcaspi_clkspeed = spi->max_speed_hz; - else - qcaspi_clkspeed = QCASPI_CLK_SPEED; - } + if (qcaspi_clkspeed) + spi->max_speed_hz = qcaspi_clkspeed; + else if (!spi->max_speed_hz) + spi->max_speed_hz = QCASPI_CLK_SPEED; - if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || - (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { - dev_err(&spi->dev, "Invalid clkspeed: %d\n", - qcaspi_clkspeed); + if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN || + spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) { + dev_err(&spi->dev, "Invalid clkspeed: %u\n", + spi->max_speed_hz); return -EINVAL; } @@ -953,14 +950,13 @@ qca_spi_probe(struct spi_device *spi) return -EINVAL; } - dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n", + dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n", QCASPI_DRV_VERSION, - qcaspi_clkspeed, + spi->max_speed_hz, qcaspi_burst_len, qcaspi_pluggable); spi->mode = SPI_MODE_3; - spi->max_speed_hz = qcaspi_clkspeed; if (spi_setup(spi) < 0) { dev_err(&spi->dev, "Unable to setup SPI device\n"); return -EFAULT; diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index 58ad910068d4..b3b17bd46e12 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -101,7 +101,6 @@ struct qcaspi { #endif /* user configurable options */ - u32 clkspeed; u8 legacy_mode; u16 burst_len; }; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 75836c0e91cf..676de89cefac 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4140,8 +4140,8 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, { unsigned int padto = 0, len = skb->len; - if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && - rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + if (len < 128 + RTL_MIN_PATCH_LEN && rtl_skb_is_udp(skb) && + skb_transport_header_was_set(skb)) { unsigned int trans_data_len = skb_tail_pointer(skb) - skb_transport_header(skb); @@ -4165,9 +4165,15 @@ static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, struct sk_buff *skb) { - unsigned int padto; + unsigned int padto = 0; - padto = rtl8125_quirk_udp_padto(tp, skb); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_61 ... RTL_GIGA_MAC_VER_63: + padto = rtl8125_quirk_udp_padto(tp, skb); + break; + default: + break; + } switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: @@ -5151,6 +5157,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->priv = tp; new_bus->parent = &pdev->dev; new_bus->irq[0] = PHY_MAC_INTERRUPT; + new_bus->phy_mask = GENMASK(31, 1); snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", pci_domain_nr(pdev->bus), pci_dev_id(pdev)); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 14dc5833c465..5f4297e83aaf 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3473,10 +3473,12 @@ static int sh_eth_suspend(struct device *dev) netif_device_detach(ndev); + rtnl_lock(); if (mdp->wol_enabled) ret = sh_eth_wol_setup(ndev); else ret = sh_eth_close(ndev); + rtnl_unlock(); return ret; } @@ -3490,10 +3492,12 @@ static int sh_eth_resume(struct device *dev) if (!netif_running(ndev)) return 0; + rtnl_lock(); if (mdp->wol_enabled) ret = sh_eth_wol_restore(ndev); else ret = sh_eth_open(ndev); + rtnl_unlock(); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 9e59669a93dd..2e2826c901fc 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2504,7 +2504,7 @@ static void rocker_carrier_init(const struct rocker_port *rocker_port) u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); bool link_up; - link_up = link_status & (1 << rocker_port->pport); + link_up = link_status & (1ULL << rocker_port->pport); if (link_up) netif_carrier_on(rocker_port->dev); else diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 692c7f132e9f..4eb6234292b1 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -351,7 +351,7 @@ struct efx_ptp_data { void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb); }; -static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); +static int efx_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm); static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); static int efx_phc_settime(struct ptp_clock_info *ptp, @@ -1509,7 +1509,7 @@ static const struct ptp_clock_info efx_phc_clock_info = { .n_per_out = 0, .n_pins = 0, .pps = 1, - .adjfreq = efx_phc_adjfreq, + .adjfine = efx_phc_adjfine, .adjtime = efx_phc_adjtime, .gettime64 = efx_phc_gettime, .settime64 = efx_phc_settime, @@ -2138,11 +2138,12 @@ void __efx_rx_skb_attach_timestamp(struct efx_channel *channel, ptp->ts_corrections.general_rx); } -static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +static int efx_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct efx_ptp_data *ptp_data = container_of(ptp, struct efx_ptp_data, phc_clock_info); + s32 delta = scaled_ppm_to_ppb(scaled_ppm); struct efx_nic *efx = ptp_data->efx; MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); s64 adjustment_ns; diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index 7c46752e6eae..38e666561bcd 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -347,7 +347,7 @@ struct efx_ptp_data { void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb); }; -static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta); +static int efx_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm); static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta); static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); static int efx_phc_settime(struct ptp_clock_info *ptp, @@ -1429,7 +1429,7 @@ static const struct ptp_clock_info efx_phc_clock_info = { .n_per_out = 0, .n_pins = 0, .pps = 1, - .adjfreq = efx_phc_adjfreq, + .adjfine = efx_phc_adjfine, .adjtime = efx_phc_adjtime, .gettime64 = efx_phc_gettime, .settime64 = efx_phc_settime, @@ -2044,11 +2044,12 @@ void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel, ptp->ts_corrections.general_rx); } -static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +static int efx_phc_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct efx_ptp_data *ptp_data = container_of(ptp, struct efx_ptp_data, phc_clock_info); + s32 delta = scaled_ppm_to_ppb(scaled_ppm); struct efx_nic *efx = ptp_data->efx; MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN); s64 adjustment_ns; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index 6b447d8f0bd8..9c726fe40acd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -426,6 +426,8 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; + plat_dat->riwt_off = 1; + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) goto err_remove_config_dt; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index f834472599f7..0921b78c6244 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -948,7 +948,7 @@ static int sun8i_dwmac_set_syscon(struct device *dev, /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY * address. No need to mask it again. */ - reg |= 1 << H3_EPHY_ADDR_SHIFT; + reg |= ret << H3_EPHY_ADDR_SHIFT; } else { /* For SoCs without internal PHY the PHY selection bit should be * set to 0 (external PHY). diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index be80ca6534ac..abd1077fdd40 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -8,6 +8,7 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#include #include #include #include @@ -293,62 +294,80 @@ out: } /** - * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources - * @plat: driver data platform structure - * @np: device tree node - * @dev: device pointer - * Description: - * The mdio bus will be allocated in case of a phy transceiver is on board; - * it will be NULL if the fixed-link is configured. - * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated - * in any case (for DSA, mdio must be registered even if fixed-link). - * The table below sums the supported configurations: - * ------------------------------- - * snps,phy-addr | Y - * ------------------------------- - * phy-handle | Y - * ------------------------------- - * fixed-link | N - * ------------------------------- - * snps,dwmac-mdio | - * even if | Y - * fixed-link | - * ------------------------------- + * stmmac_of_get_mdio() - Gets the MDIO bus from the devicetree. + * @np: devicetree node * - * It returns 0 in case of success otherwise -ENODEV. + * The MDIO bus will be searched for in the following ways: + * 1. The compatible is "snps,dwc-qos-ethernet-4.10" && a "mdio" named + * child node exists + * 2. A child node with the "snps,dwmac-mdio" compatible is present + * + * Return: The MDIO node if present otherwise NULL */ -static int stmmac_dt_phy(struct plat_stmmacenet_data *plat, - struct device_node *np, struct device *dev) +static struct device_node *stmmac_of_get_mdio(struct device_node *np) { - bool mdio = !of_phy_is_fixed_link(np); static const struct of_device_id need_mdio_ids[] = { { .compatible = "snps,dwc-qos-ethernet-4.10" }, {}, }; + struct device_node *mdio_node = NULL; if (of_match_node(need_mdio_ids, np)) { - plat->mdio_node = of_get_child_by_name(np, "mdio"); + mdio_node = of_get_child_by_name(np, "mdio"); } else { /** * If snps,dwmac-mdio is passed from DT, always register * the MDIO */ - for_each_child_of_node(np, plat->mdio_node) { - if (of_device_is_compatible(plat->mdio_node, + for_each_child_of_node(np, mdio_node) { + if (of_device_is_compatible(mdio_node, "snps,dwmac-mdio")) break; } } - if (plat->mdio_node) { - dev_dbg(dev, "Found MDIO subnode\n"); - mdio = true; - } + return mdio_node; +} - if (mdio) { - plat->mdio_bus_data = - devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data), - GFP_KERNEL); +/** + * stmmac_mdio_setup() - Populate platform related MDIO structures. + * @plat: driver data platform structure + * @np: devicetree node + * @dev: device pointer + * + * This searches for MDIO information from the devicetree. + * If an MDIO node is found, it's assigned to plat->mdio_node and + * plat->mdio_bus_data is allocated. + * If no connection can be determined, just plat->mdio_bus_data is allocated + * to indicate a bus should be created and scanned for a phy. + * If it's determined there's no MDIO bus needed, both are left NULL. + * + * This expects that plat->phy_node has already been searched for. + * + * Return: 0 on success, errno otherwise. + */ +static int stmmac_mdio_setup(struct plat_stmmacenet_data *plat, + struct device_node *np, struct device *dev) +{ + bool legacy_mdio; + + plat->mdio_node = stmmac_of_get_mdio(np); + if (plat->mdio_node) + dev_dbg(dev, "Found MDIO subnode\n"); + + /* Legacy devicetrees allowed for no MDIO bus description and expect + * the bus to be scanned for devices. If there's no phy or fixed-link + * described assume this is the case since there must be something + * connected to the MAC. + */ + legacy_mdio = !of_phy_is_fixed_link(np) && !plat->phy_node; + if (legacy_mdio) + dev_info(dev, "Deprecated MDIO bus assumption used\n"); + + if (plat->mdio_node || legacy_mdio) { + plat->mdio_bus_data = devm_kzalloc(dev, + sizeof(*plat->mdio_bus_data), + GFP_KERNEL); if (!plat->mdio_bus_data) return -ENOMEM; @@ -453,10 +472,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); - /* To Configure PHY by using all device-tree supported properties */ - rc = stmmac_dt_phy(plat, np, &pdev->dev); - if (rc) - return ERR_PTR(rc); + rc = stmmac_mdio_setup(plat, np, &pdev->dev); + if (rc) { + ret = ERR_PTR(rc); + goto error_put_phy; + } of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size); @@ -545,8 +565,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (!dma_cfg) { - stmmac_remove_config_dt(pdev, plat); - return ERR_PTR(-ENOMEM); + ret = ERR_PTR(-ENOMEM); + goto error_put_mdio; } plat->dma_cfg = dma_cfg; @@ -577,8 +597,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) rc = stmmac_mtl_setup(pdev, plat); if (rc) { - stmmac_remove_config_dt(pdev, plat); - return ERR_PTR(rc); + ret = ERR_PTR(rc); + goto error_put_mdio; } /* clock setup */ @@ -630,10 +650,48 @@ error_hw_init: clk_disable_unprepare(plat->pclk); error_pclk_get: clk_disable_unprepare(plat->stmmac_clk); +error_put_mdio: + of_node_put(plat->mdio_node); +error_put_phy: + of_node_put(plat->phy_node); return ret; } +static void devm_stmmac_remove_config_dt(void *data) +{ + struct plat_stmmacenet_data *plat = data; + + clk_disable_unprepare(plat->stmmac_clk); + clk_disable_unprepare(plat->pclk); + of_node_put(plat->mdio_node); + of_node_put(plat->phy_node); +} + +/** + * devm_stmmac_probe_config_dt + * @pdev: platform_device structure + * @mac: MAC address to use + * Description: Devres variant of stmmac_probe_config_dt(). + */ +struct plat_stmmacenet_data * +devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) +{ + struct plat_stmmacenet_data *plat; + int ret; + + plat = stmmac_probe_config_dt(pdev, mac); + if (IS_ERR(plat)) + return plat; + + ret = devm_add_action_or_reset(&pdev->dev, + devm_stmmac_remove_config_dt, plat); + if (ret) + return ERR_PTR(ret); + + return plat; +} + /** * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt() * @pdev: platform_device structure @@ -656,12 +714,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) return ERR_PTR(-EINVAL); } +struct plat_stmmacenet_data * +devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) +{ + return ERR_PTR(-EINVAL); +} + void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat) { } #endif /* CONFIG_OF */ EXPORT_SYMBOL_GPL(stmmac_probe_config_dt); +EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt); EXPORT_SYMBOL_GPL(stmmac_remove_config_dt); int stmmac_get_platform_resources(struct platform_device *pdev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index 3fff3f59d73d..2102c6d41464 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -13,6 +13,8 @@ struct plat_stmmacenet_data * stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); +struct plat_stmmacenet_data * +devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); void stmmac_remove_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat); diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 33df06a2de13..a0a9e4e13e77 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -1541,7 +1541,7 @@ void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) for (i = 0; i < common->tx_ch_num; i++) { struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; - if (tx_chn->irq) + if (tx_chn->irq > 0) devm_free_irq(dev, tx_chn->irq, tx_chn); netif_napi_del(&tx_chn->napi_tx); @@ -1918,7 +1918,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) port->slave.mac_addr); if (!is_valid_ether_addr(port->slave.mac_addr)) { eth_random_addr(port->slave.mac_addr); - dev_err(dev, "Use random MAC address\n"); + dev_info(dev, "Use random MAC address\n"); } } } diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c index c1bdf045e981..d7ebc6e12143 100644 --- a/drivers/net/ethernet/ti/am65-cpts.c +++ b/drivers/net/ethernet/ti/am65-cpts.c @@ -381,9 +381,10 @@ static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id) } /* PTP clock operations */ -static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info); + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); int neg_adj = 0; u64 adj_period; u32 val; @@ -615,7 +616,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp); static struct ptp_clock_info am65_ptp_info = { .owner = THIS_MODULE, .name = "CTPS timer", - .adjfreq = am65_cpts_ptp_adjfreq, + .adjfine = am65_cpts_ptp_adjfine, .adjtime = am65_cpts_ptp_adjtime, .gettimex64 = am65_cpts_ptp_gettimex, .settime64 = am65_cpts_ptp_settime, diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 2647c18d40d9..3d42ca15e877 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -106,15 +106,15 @@ struct cpsw_ale_dev_id { static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) { - int idx, idx2; + int idx, idx2, index; u32 hi_val = 0; idx = start / 32; idx2 = (start + bits - 1) / 32; /* Check if bits to be fetched exceed a word */ if (idx != idx2) { - idx2 = 2 - idx2; /* flip */ - hi_val = ale_entry[idx2] << ((idx2 * 32) - start); + index = 2 - idx2; /* flip */ + hi_val = ale_entry[index] << ((idx2 * 32) - start); } start -= idx * 32; idx = 2 - idx; /* flip */ @@ -124,16 +124,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, u32 value) { - int idx, idx2; + int idx, idx2, index; value &= BITMASK(bits); idx = start / 32; idx2 = (start + bits - 1) / 32; /* Check if bits to be set exceed a word */ if (idx != idx2) { - idx2 = 2 - idx2; /* flip */ - ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32))); - ale_entry[idx2] |= (value >> ((idx2 * 32) - start)); + index = 2 - idx2; /* flip */ + ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32))); + ale_entry[index] |= (value >> ((idx2 * 32) - start)); } start -= idx * 32; idx = 2 - idx; /* flip */ diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 6e70aa1cc7bf..42684cb83606 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -1411,6 +1411,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw) ndev->netdev_ops = &cpsw_netdev_ops; ndev->ethtool_ops = &cpsw_ethtool_ops; SET_NETDEV_DEV(ndev, dev); + ndev->dev.of_node = slave_data->slave_node; if (!napi_ndev) { /* CPSW Host port CPDMA interface is shared between diff --git a/drivers/net/ethernet/vertexcom/mse102x.c b/drivers/net/ethernet/vertexcom/mse102x.c index dd766e175f7d..060a566bc6aa 100644 --- a/drivers/net/ethernet/vertexcom/mse102x.c +++ b/drivers/net/ethernet/vertexcom/mse102x.c @@ -6,6 +6,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include @@ -33,7 +34,7 @@ #define CMD_CTR (0x2 << CMD_SHIFT) #define CMD_MASK GENMASK(15, CMD_SHIFT) -#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0) +#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0) #define DET_CMD_LEN 4 #define DET_SOF_LEN 2 @@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp, } static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, - unsigned int frame_len) + unsigned int frame_len, bool drop) { struct mse102x_net_spi *mses = to_mse102x_spi(mse); struct spi_transfer *xfer = &mses->spi_xfer; @@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff, netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n", __func__, ret); mse->stats.xfer_err++; + } else if (drop) { + netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__); + ret = -EINVAL; } else if (*sof != cpu_to_be16(DET_SOF)) { netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n", __func__, *sof); @@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) struct sk_buff *skb; unsigned int rxalign; unsigned int rxlen; + bool drop = false; __be16 rx = 0; u16 cmd_resp; u8 *rxpkt; @@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n", __func__, cmd_resp); mse->stats.invalid_rts++; - return; + drop = true; + goto drop; } net_dbg_ratelimited("%s: Unexpected response to first CMD\n", @@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) } rxlen = cmd_resp & LEN_MASK; - if (!rxlen) { - net_dbg_ratelimited("%s: No frame length defined\n", __func__); + if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) { + net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__, + rxlen); mse->stats.invalid_len++; - return; + drop = true; } + /* In case of a invalid CMD_RTS, the frame must be consumed anyway. + * So assume the maximum possible frame length. + */ +drop: + if (drop) + rxlen = VLAN_ETH_FRAME_LEN; + rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4); skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign); if (!skb) @@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse) * They are copied, but ignored. */ rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN; - if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) { + if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) { mse->ndev->stats.rx_errors++; dev_kfree_skb(skb); return; @@ -437,13 +451,15 @@ static void mse102x_tx_work(struct work_struct *work) mse = &mses->mse102x; while ((txb = skb_dequeue(&mse->txq))) { + unsigned int len = max_t(unsigned int, txb->len, ETH_ZLEN); + mutex_lock(&mses->lock); ret = mse102x_tx_pkt_spi(mse, txb, work_timeout); mutex_unlock(&mses->lock); if (ret) { mse->ndev->stats.tx_dropped++; } else { - mse->ndev->stats.tx_bytes += txb->len; + mse->ndev->stats.tx_bytes += len; mse->ndev->stats.tx_packets++; } @@ -507,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse) static int mse102x_net_open(struct net_device *ndev) { struct mse102x_net *mse = netdev_priv(ndev); + struct mse102x_net_spi *mses = to_mse102x_spi(mse); int ret; ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT, @@ -522,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev) netif_carrier_on(ndev); + /* The SPI interrupt can stuck in case of pending packet(s). + * So poll for possible packet(s) to re-arm the interrupt. + */ + mutex_lock(&mses->lock); + mse102x_rx_pkt_spi(mse); + mutex_unlock(&mses->lock); + netif_dbg(mse, ifup, ndev, "network device up\n"); return 0; diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index ce0dd78826af..f227ed8e9934 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1570,6 +1570,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, return -EFAULT; } + if (ecoalesce->rx_max_coalesced_frames > 255 || + ecoalesce->tx_max_coalesced_frames > 255) { + NL_SET_ERR_MSG(extack, "frames must be less than 256"); + return -EINVAL; + } + if (ecoalesce->rx_max_coalesced_frames) lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; if (ecoalesce->rx_coalesce_usecs) @@ -2153,6 +2159,7 @@ static int axienet_probe(struct platform_device *pdev) lp->phylink_config.dev = &ndev->dev; lp->phylink_config.type = PHYLINK_NETDEV; + lp->phylink_config.mac_managed_pm = true; lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10FD | MAC_100FD | MAC_1000FD; diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index c4a49a75250e..3dd5c69b05cb 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -990,7 +990,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (geneve->cfg.df == GENEVE_DF_SET) { df = htons(IP_DF); } else if (geneve->cfg.df == GENEVE_DF_INHERIT) { - struct ethhdr *eth = eth_hdr(skb); + struct ethhdr *eth = skb_eth_hdr(skb); if (ntohs(eth->h_proto) == ETH_P_IPV6) { df = htons(IP_DF); @@ -1965,21 +1965,9 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *geneve, *next; - struct net_device *dev, *aux; - /* gather any geneve devices that were moved into this ns */ - for_each_netdev_safe(net, dev, aux) - if (dev->rtnl_link_ops == &geneve_link_ops) - unregister_netdevice_queue(dev, head); - - /* now gather any other geneve devices that were created in this ns */ - list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) { - /* If geneve->dev is in the same netns, it was already added - * to the list by the previous loop. - */ - if (!net_eq(dev_net(geneve->dev), net)) - unregister_netdevice_queue(geneve->dev, head); - } + list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) + geneve_dellink(geneve->dev, head); } static void __net_exit geneve_exit_batch_net(struct list_head *net_list) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index 5e0332c9d0d7..797886f10868 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -1094,8 +1094,8 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, goto out_encap; } - gn = net_generic(dev_net(dev), gtp_net_id); - list_add_rcu(>p->list, &gn->gtp_dev_list); + gn = net_generic(src_net, gtp_net_id); + list_add(>p->list, &gn->gtp_dev_list); dev->priv_destructor = gtp_destructor; netdev_dbg(dev, "registered new GTP interface\n"); @@ -1121,7 +1121,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head) hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid) pdp_context_delete(pctx); - list_del_rcu(>p->list); + list_del(>p->list); unregister_netdevice_queue(dev, head); } @@ -1689,16 +1689,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; int i, j, bucket = cb->args[0], skip = cb->args[1]; struct net *net = sock_net(skb->sk); + struct net_device *dev; struct pdp_ctx *pctx; - struct gtp_net *gn; - - gn = net_generic(net, gtp_net_id); if (cb->args[4]) return 0; rcu_read_lock(); - list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { + for_each_netdev_rcu(net, dev) { + if (dev->rtnl_link_ops != >p_link_ops) + continue; + + gtp = netdev_priv(dev); + if (last_gtp && last_gtp != gtp) continue; else @@ -1883,23 +1886,23 @@ static int __net_init gtp_net_init(struct net *net) return 0; } -static void __net_exit gtp_net_exit(struct net *net) +static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, + struct list_head *dev_to_kill) { - struct gtp_net *gn = net_generic(net, gtp_net_id); - struct gtp_dev *gtp; - LIST_HEAD(list); + struct net *net; - rtnl_lock(); - list_for_each_entry(gtp, &gn->gtp_dev_list, list) - gtp_dellink(gtp->dev, &list); + list_for_each_entry(net, net_list, exit_list) { + struct gtp_net *gn = net_generic(net, gtp_net_id); + struct gtp_dev *gtp, *gtp_next; - unregister_netdevice_many(&list); - rtnl_unlock(); + list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list) + gtp_dellink(gtp->dev, dev_to_kill); + } } static struct pernet_operations gtp_net_ops = { .init = gtp_net_init, - .exit = gtp_net_exit, + .exit_batch_rtnl = gtp_net_exit_batch_rtnl, .id = >p_net_id, .size = sizeof(struct gtp_net), }; diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index dd5919ec408b..e7fd72f57e36 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -156,7 +156,6 @@ struct hv_netvsc_packet { u8 cp_partial; /* partial copy into send buffer */ u8 rmsg_size; /* RNDIS header and PPI size */ - u8 rmsg_pgcnt; /* page count of RNDIS header and PPI */ u8 page_buf_cnt; u16 q_idx; @@ -891,6 +890,18 @@ struct nvsp_message { sizeof(struct nvsp_message)) #define NETVSC_MIN_IN_MSG_SIZE sizeof(struct vmpacket_descriptor) +/* Maximum # of contiguous data ranges that can make up a trasmitted packet. + * Typically it's the max SKB fragments plus 2 for the rndis packet and the + * linear portion of the SKB. But if MAX_SKB_FRAGS is large, the value may + * need to be limited to MAX_PAGE_BUFFER_COUNT, which is the max # of entries + * in a GPA direct packet sent to netvsp over VMBus. + */ +#if MAX_SKB_FRAGS + 2 < MAX_PAGE_BUFFER_COUNT +#define MAX_DATA_RANGES (MAX_SKB_FRAGS + 2) +#else +#define MAX_DATA_RANGES MAX_PAGE_BUFFER_COUNT +#endif + /* Estimated requestor size: * out_ring_size/min_out_msg_size + in_ring_size/min_in_msg_size */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 3a834d4e1c84..ac2d706ef2d0 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -980,8 +980,7 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, + pend_size; int i; u32 padding = 0; - u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : - packet->page_buf_cnt; + u32 page_count = packet->cp_partial ? 1 : packet->page_buf_cnt; u32 remain; /* Add padding */ @@ -1082,6 +1081,42 @@ static int netvsc_dma_map(struct hv_device *hv_dev, return 0; } +/* Build an "array" of mpb entries describing the data to be transferred + * over VMBus. After the desc header fields, each "array" entry is variable + * size, and each entry starts after the end of the previous entry. The + * "offset" and "len" fields for each entry imply the size of the entry. + * + * The pfns are in HV_HYP_PAGE_SIZE, because all communication with Hyper-V + * uses that granularity, even if the system page size of the guest is larger. + * Each entry in the input "pb" array must describe a contiguous range of + * guest physical memory so that the pfns are sequential if the range crosses + * a page boundary. The offset field must be < HV_HYP_PAGE_SIZE. + */ +static inline void netvsc_build_mpb_array(struct hv_page_buffer *pb, + u32 page_buffer_count, + struct vmbus_packet_mpb_array *desc, + u32 *desc_size) +{ + struct hv_mpb_array *mpb_entry = &desc->range; + int i, j; + + for (i = 0; i < page_buffer_count; i++) { + u32 offset = pb[i].offset; + u32 len = pb[i].len; + + mpb_entry->offset = offset; + mpb_entry->len = len; + + for (j = 0; j < HVPFN_UP(offset + len); j++) + mpb_entry->pfn_array[j] = pb[i].pfn + j; + + mpb_entry = (struct hv_mpb_array *)&mpb_entry->pfn_array[j]; + } + + desc->rangecount = page_buffer_count; + *desc_size = (char *)mpb_entry - (char *)desc; +} + static inline int netvsc_send_pkt( struct hv_device *device, struct hv_netvsc_packet *packet, @@ -1124,8 +1159,11 @@ static inline int netvsc_send_pkt( packet->dma_range = NULL; if (packet->page_buf_cnt) { + struct vmbus_channel_packet_page_buffer desc; + u32 desc_size; + if (packet->cp_partial) - pb += packet->rmsg_pgcnt; + pb++; ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb); if (ret) { @@ -1133,11 +1171,12 @@ static inline int netvsc_send_pkt( goto exit; } - ret = vmbus_sendpacket_pagebuffer(out_channel, - pb, packet->page_buf_cnt, - &nvmsg, sizeof(nvmsg), - req_id); - + netvsc_build_mpb_array(pb, packet->page_buf_cnt, + (struct vmbus_packet_mpb_array *)&desc, + &desc_size); + ret = vmbus_sendpacket_mpb_desc(out_channel, + (struct vmbus_packet_mpb_array *)&desc, + desc_size, &nvmsg, sizeof(nvmsg), req_id); if (ret) netvsc_dma_unmap(ndev_ctx->device_ctx, packet); } else { @@ -1286,7 +1325,7 @@ int netvsc_send(struct net_device *ndev, packet->send_buf_index = section_index; if (packet->cp_partial) { - packet->page_buf_cnt -= packet->rmsg_pgcnt; + packet->page_buf_cnt--; packet->total_data_buflen = msd_len + packet->rmsg_size; } else { packet->page_buf_cnt = 0; diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 2fba5dfd9cd0..96a1767281f7 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -325,43 +325,10 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, return txq; } -static u32 fill_pg_buf(unsigned long hvpfn, u32 offset, u32 len, - struct hv_page_buffer *pb) -{ - int j = 0; - - hvpfn += offset >> HV_HYP_PAGE_SHIFT; - offset = offset & ~HV_HYP_PAGE_MASK; - - while (len > 0) { - unsigned long bytes; - - bytes = HV_HYP_PAGE_SIZE - offset; - if (bytes > len) - bytes = len; - pb[j].pfn = hvpfn; - pb[j].offset = offset; - pb[j].len = bytes; - - offset += bytes; - len -= bytes; - - if (offset == HV_HYP_PAGE_SIZE && len) { - hvpfn++; - offset = 0; - j++; - } - } - - return j + 1; -} - static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, struct hv_netvsc_packet *packet, struct hv_page_buffer *pb) { - u32 slots_used = 0; - char *data = skb->data; int frags = skb_shinfo(skb)->nr_frags; int i; @@ -370,28 +337,27 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb, * 2. skb linear data * 3. skb fragment data */ - slots_used += fill_pg_buf(virt_to_hvpfn(hdr), - offset_in_hvpage(hdr), - len, - &pb[slots_used]); + pb[0].offset = offset_in_hvpage(hdr); + pb[0].len = len; + pb[0].pfn = virt_to_hvpfn(hdr); packet->rmsg_size = len; - packet->rmsg_pgcnt = slots_used; - slots_used += fill_pg_buf(virt_to_hvpfn(data), - offset_in_hvpage(data), - skb_headlen(skb), - &pb[slots_used]); + pb[1].offset = offset_in_hvpage(skb->data); + pb[1].len = skb_headlen(skb); + pb[1].pfn = virt_to_hvpfn(skb->data); for (i = 0; i < frags; i++) { skb_frag_t *frag = skb_shinfo(skb)->frags + i; + struct hv_page_buffer *cur_pb = &pb[i + 2]; + u64 pfn = page_to_hvpfn(skb_frag_page(frag)); + u32 offset = skb_frag_off(frag); - slots_used += fill_pg_buf(page_to_hvpfn(skb_frag_page(frag)), - skb_frag_off(frag), - skb_frag_size(frag), - &pb[slots_used]); + cur_pb->offset = offset_in_hvpage(offset); + cur_pb->len = skb_frag_size(frag); + cur_pb->pfn = pfn + (offset >> HV_HYP_PAGE_SHIFT); } - return slots_used; + return frags + 2; } static int count_skb_frag_slots(struct sk_buff *skb) @@ -482,7 +448,7 @@ static int netvsc_xmit(struct sk_buff *skb, struct net_device *net, bool xdp_tx) struct net_device *vf_netdev; u32 rndis_msg_size; u32 hash; - struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT]; + struct hv_page_buffer pb[MAX_DATA_RANGES]; /* If VF is present and up then redirect packets to it. * Skip the VF if it is marked down or has no carrier. diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index eea777ec2541..bb656ea09773 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -225,8 +225,7 @@ static int rndis_filter_send_request(struct rndis_device *dev, struct rndis_request *req) { struct hv_netvsc_packet *packet; - struct hv_page_buffer page_buf[2]; - struct hv_page_buffer *pb = page_buf; + struct hv_page_buffer pb; int ret; /* Setup the packet to send it */ @@ -235,27 +234,14 @@ static int rndis_filter_send_request(struct rndis_device *dev, packet->total_data_buflen = req->request_msg.msg_len; packet->page_buf_cnt = 1; - pb[0].pfn = virt_to_phys(&req->request_msg) >> - HV_HYP_PAGE_SHIFT; - pb[0].len = req->request_msg.msg_len; - pb[0].offset = offset_in_hvpage(&req->request_msg); - - /* Add one page_buf when request_msg crossing page boundary */ - if (pb[0].offset + pb[0].len > HV_HYP_PAGE_SIZE) { - packet->page_buf_cnt++; - pb[0].len = HV_HYP_PAGE_SIZE - - pb[0].offset; - pb[1].pfn = virt_to_phys((void *)&req->request_msg - + pb[0].len) >> HV_HYP_PAGE_SHIFT; - pb[1].offset = 0; - pb[1].len = req->request_msg.msg_len - - pb[0].len; - } + pb.pfn = virt_to_phys(&req->request_msg) >> HV_HYP_PAGE_SHIFT; + pb.len = req->request_msg.msg_len; + pb.offset = offset_in_hvpage(&req->request_msg); trace_rndis_send(dev->ndev, 0, &req->request_msg); rcu_read_lock_bh(); - ret = netvsc_send(dev->ndev, packet, NULL, pb, NULL, false); + ret = netvsc_send(dev->ndev, packet, NULL, &pb, NULL, false); rcu_read_unlock_bh(); return ret; diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index c2201e0adc46..463be34a4ca4 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1446,8 +1446,7 @@ static u8 mcps_data_request( command.pdata.data_req.src_addr_mode = src_addr_mode; command.pdata.data_req.dst.mode = dst_address_mode; if (dst_address_mode != MAC_MODE_NO_ADDR) { - command.pdata.data_req.dst.pan_id[0] = LS_BYTE(dst_pan_id); - command.pdata.data_req.dst.pan_id[1] = MS_BYTE(dst_pan_id); + put_unaligned_le16(dst_pan_id, command.pdata.data_req.dst.pan_id); if (dst_address_mode == MAC_MODE_SHORT_ADDR) { command.pdata.data_req.dst.address[0] = LS_BYTE( dst_addr->short_address @@ -1795,12 +1794,12 @@ static int ca8210_skb_rx( } hdr.source.mode = data_ind[0]; dev_dbg(&priv->spi->dev, "srcAddrMode: %#03x\n", hdr.source.mode); - hdr.source.pan_id = *(u16 *)&data_ind[1]; + hdr.source.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[1])); dev_dbg(&priv->spi->dev, "srcPanId: %#06x\n", hdr.source.pan_id); memcpy(&hdr.source.extended_addr, &data_ind[3], 8); hdr.dest.mode = data_ind[11]; dev_dbg(&priv->spi->dev, "dstAddrMode: %#03x\n", hdr.dest.mode); - hdr.dest.pan_id = *(u16 *)&data_ind[12]; + hdr.dest.pan_id = cpu_to_le16(get_unaligned_le16(&data_ind[12])); dev_dbg(&priv->spi->dev, "dstPanId: %#06x\n", hdr.dest.pan_id); memcpy(&hdr.dest.extended_addr, &data_ind[14], 8); @@ -1927,7 +1926,7 @@ static int ca8210_skb_tx( status = mcps_data_request( header.source.mode, header.dest.mode, - header.dest.pan_id, + le16_to_cpu(header.dest.pan_id), (union macaddr *)&header.dest.extended_addr, skb->len - mac_len, &skb->data[mac_len], @@ -3078,7 +3077,11 @@ static int ca8210_probe(struct spi_device *spi_device) spi_set_drvdata(priv->spi, priv); if (IS_ENABLED(CONFIG_IEEE802154_CA8210_DEBUGFS)) { cascoda_api_upstream = ca8210_test_int_driver_write; - ca8210_test_interface_init(priv); + ret = ca8210_test_interface_init(priv); + if (ret) { + dev_crit(&spi_device->dev, "ca8210_test_interface_init failed\n"); + goto error; + } } else { cascoda_api_upstream = NULL; } diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 1d49771d07f4..eea81a733405 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -2,6 +2,9 @@ /* Copyright (c) 2014 Mahesh Bandewar */ +#include +#include + #include "ipvlan.h" static u32 ipvlan_jhash_secret __read_mostly; @@ -413,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h, static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb) { - const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; struct net *net = dev_net(dev); - struct rtable *rt; int err, ret = NET_XMIT_DROP; + const struct iphdr *ip4h; + struct rtable *rt; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, - .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC, .flowi4_mark = skb->mark, - .daddr = ip4h->daddr, - .saddr = ip4h->saddr, }; + if (!pskb_network_may_pull(skb, sizeof(struct iphdr))) + goto err; + + ip4h = ip_hdr(skb); + fl4.daddr = ip4h->daddr; + fl4.saddr = ip4h->saddr; + fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)); + rt = ip_route_output_flow(net, &fl4, NULL); if (IS_ERR(rt)) goto err; @@ -485,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) struct net_device *dev = skb->dev; int err, ret = NET_XMIT_DROP; + if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) { + DEV_STATS_INC(dev, tx_errors); + kfree_skb(skb); + return ret; + } + err = ipvlan_route_v6_outbound(dev, skb); if (unlikely(err)) { DEV_STATS_INC(dev, tx_errors); diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index a4969f1a7e36..14db5b7c92d6 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -246,8 +246,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb) +{ + kfree_skb(skb); + return 0; +} + +static int blackhole_neigh_construct(struct net_device *dev, + struct neighbour *n) +{ + n->output = blackhole_neigh_output; + return 0; +} + static const struct net_device_ops blackhole_netdev_ops = { .ndo_start_xmit = blackhole_netdev_xmit, + .ndo_neigh_construct = blackhole_neigh_construct, }; /* This is a dst-dummy device used specifically for invalidated diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index 7635a8b3c35c..17619d011689 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -543,6 +543,7 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, struct mctp_i2c_hdr *hdr; struct mctp_hdr *mhdr; u8 lldst, llsrc; + int rc; if (len > MCTP_I2C_MAXMTU) return -EMSGSIZE; @@ -553,6 +554,10 @@ static int mctp_i2c_header_create(struct sk_buff *skb, struct net_device *dev, lldst = *((u8 *)daddr); llsrc = *((u8 *)saddr); + rc = skb_cow_head(skb, sizeof(struct mctp_i2c_hdr)); + if (rc) + return rc; + skb_push(skb, sizeof(struct mctp_i2c_hdr)); skb_reset_mac_header(skb); hdr = (void *)skb_mac_header(skb); diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c index b782c35c4ac1..8ada397bc357 100644 --- a/drivers/net/mdio/fwnode_mdio.c +++ b/drivers/net/mdio/fwnode_mdio.c @@ -38,6 +38,7 @@ fwnode_find_pse_control(struct fwnode_handle *fwnode) static struct mii_timestamper * fwnode_find_mii_timestamper(struct fwnode_handle *fwnode) { + struct mii_timestamper *mii_ts; struct of_phandle_args arg; int err; @@ -51,10 +52,16 @@ fwnode_find_mii_timestamper(struct fwnode_handle *fwnode) else if (err) return ERR_PTR(err); - if (arg.args_count != 1) - return ERR_PTR(-EINVAL); + if (arg.args_count != 1) { + mii_ts = ERR_PTR(-EINVAL); + goto put_node; + } - return register_mii_timestamper(arg.np, arg.args[0]); + mii_ts = register_mii_timestamper(arg.np, arg.args[0]); + +put_node: + of_node_put(arg.np); + return mii_ts; } int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c index 4eba5a91075c..da5dc854b6ca 100644 --- a/drivers/net/mdio/mdio-ipq4019.c +++ b/drivers/net/mdio/mdio-ipq4019.c @@ -231,8 +231,11 @@ static int ipq4019_mdio_probe(struct platform_device *pdev) /* The platform resource is provided on the chipset IPQ5018 */ /* This resource is optional */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) + if (res) { priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->eth_ldo_rdy)) + return PTR_ERR(priv->eth_ldo_rdy); + } bus->name = "ipq4019_mdio"; bus->read = ipq4019_mdio_read; diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c index aa77af4a68df..bc44462c70e2 100644 --- a/drivers/net/netdevsim/health.c +++ b/drivers/net/netdevsim/health.c @@ -203,6 +203,8 @@ static ssize_t nsim_dev_health_break_write(struct file *file, char *break_msg; int err; + if (count == 0 || count > PAGE_SIZE) + return -EINVAL; break_msg = memdup_user_nul(data, count); if (IS_ERR(break_msg)) return PTR_ERR(break_msg); diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index 386336a38f34..ec1fc1d3ea36 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -39,10 +39,14 @@ static ssize_t nsim_dbg_netdev_ops_read(struct file *filp, if (!sap->used) continue; - p += scnprintf(p, bufsize - (p - buf), - "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n", - i, (sap->rx ? 'r' : 't'), sap->ipaddr[0], - sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]); + if (sap->xs->props.family == AF_INET6) + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=%pI6c\n", + i, (sap->rx ? 'r' : 't'), &sap->ipaddr); + else + p += scnprintf(p, bufsize - (p - buf), + "sa[%i] %cx ipaddr=%pI4\n", + i, (sap->rx ? 'r' : 't'), &sap->ipaddr[3]); p += scnprintf(p, bufsize - (p - buf), "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n", i, be32_to_cpu(sap->xs->id.spi), @@ -171,14 +175,13 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs) return ret; } - if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) { + if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) sa.rx = true; - if (xs->props.family == AF_INET6) - memcpy(sa.ipaddr, &xs->id.daddr.a6, 16); - else - memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4); - } + if (xs->props.family == AF_INET6) + memcpy(sa.ipaddr, &xs->id.daddr.a6, 16); + else + memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4); /* the preparations worked, so save the info */ memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa)); diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h index 7d8ed8d8df5c..02e3518e9a7e 100644 --- a/drivers/net/netdevsim/netdevsim.h +++ b/drivers/net/netdevsim/netdevsim.h @@ -98,6 +98,7 @@ struct netdevsim { u32 sleep; u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS]; u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS]; + struct dentry *ddir; struct debugfs_u32_array dfs_ports[2]; } udp_ports; diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c index 02dc3123eb6c..640b4983a9a0 100644 --- a/drivers/net/netdevsim/udp_tunnels.c +++ b/drivers/net/netdevsim/udp_tunnels.c @@ -112,9 +112,11 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data, struct net_device *dev = file->private_data; struct netdevsim *ns = netdev_priv(dev); - memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports)); rtnl_lock(); - udp_tunnel_nic_reset_ntf(dev); + if (dev->reg_state == NETREG_REGISTERED) { + memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports)); + udp_tunnel_nic_reset_ntf(dev); + } rtnl_unlock(); return count; @@ -144,23 +146,23 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev, else ns->udp_ports.ports = nsim_dev->udp_ports.__ports; - debugfs_create_u32("udp_ports_inject_error", 0600, - ns->nsim_dev_port->ddir, + ns->udp_ports.ddir = debugfs_create_dir("udp_ports", + ns->nsim_dev_port->ddir); + + debugfs_create_u32("inject_error", 0600, ns->udp_ports.ddir, &ns->udp_ports.inject_error); ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0]; ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS; - debugfs_create_u32_array("udp_ports_table0", 0400, - ns->nsim_dev_port->ddir, + debugfs_create_u32_array("table0", 0400, ns->udp_ports.ddir, &ns->udp_ports.dfs_ports[0]); ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1]; ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS; - debugfs_create_u32_array("udp_ports_table1", 0400, - ns->nsim_dev_port->ddir, + debugfs_create_u32_array("table1", 0400, ns->udp_ports.ddir, &ns->udp_ports.dfs_ports[1]); - debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir, + debugfs_create_file("reset", 0200, ns->udp_ports.ddir, dev, &nsim_udp_tunnels_info_reset_fops); /* Note: it's not normal to allocate the info struct like this! @@ -196,6 +198,9 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev, void nsim_udp_tunnels_info_destroy(struct net_device *dev) { + struct netdevsim *ns = netdev_priv(dev); + + debugfs_remove_recursive(ns->udp_ports.ddir); kfree(dev->udp_tunnel_nic_info); dev->udp_tunnel_nic_info = NULL; } diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 029875a59ff8..718bba014b57 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -937,6 +937,8 @@ static int nxp_c45_soft_reset(struct phy_device *phydev) if (ret) return ret; + usleep_range(2000, 2050); + return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL, ret, !(ret & DEVICE_CONTROL_RESET), 20000, diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index f550576eb9da..6f9d8da76c4d 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -91,9 +91,8 @@ int phy_led_triggers_register(struct phy_device *phy) if (!phy->phy_num_led_triggers) return 0; - phy->led_link_trigger = devm_kzalloc(&phy->mdio.dev, - sizeof(*phy->led_link_trigger), - GFP_KERNEL); + phy->led_link_trigger = kzalloc(sizeof(*phy->led_link_trigger), + GFP_KERNEL); if (!phy->led_link_trigger) { err = -ENOMEM; goto out_clear; @@ -103,10 +102,9 @@ int phy_led_triggers_register(struct phy_device *phy) if (err) goto out_free_link; - phy->phy_led_triggers = devm_kcalloc(&phy->mdio.dev, - phy->phy_num_led_triggers, - sizeof(struct phy_led_trigger), - GFP_KERNEL); + phy->phy_led_triggers = kcalloc(phy->phy_num_led_triggers, + sizeof(struct phy_led_trigger), + GFP_KERNEL); if (!phy->phy_led_triggers) { err = -ENOMEM; goto out_unreg_link; @@ -127,11 +125,11 @@ int phy_led_triggers_register(struct phy_device *phy) out_unreg: while (i--) phy_led_trigger_unregister(&phy->phy_led_triggers[i]); - devm_kfree(&phy->mdio.dev, phy->phy_led_triggers); + kfree(phy->phy_led_triggers); out_unreg_link: phy_led_trigger_unregister(phy->led_link_trigger); out_free_link: - devm_kfree(&phy->mdio.dev, phy->led_link_trigger); + kfree(phy->led_link_trigger); phy->led_link_trigger = NULL; out_clear: phy->phy_num_led_triggers = 0; @@ -145,8 +143,13 @@ void phy_led_triggers_unregister(struct phy_device *phy) for (i = 0; i < phy->phy_num_led_triggers; i++) phy_led_trigger_unregister(&phy->phy_led_triggers[i]); + kfree(phy->phy_led_triggers); + phy->phy_led_triggers = NULL; - if (phy->led_link_trigger) + if (phy->led_link_trigger) { phy_led_trigger_unregister(phy->led_link_trigger); + kfree(phy->led_link_trigger); + phy->led_link_trigger = NULL; + } } EXPORT_SYMBOL_GPL(phy_led_triggers_unregister); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index fc58e4afb38d..3069a7df25d3 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1566,7 +1566,7 @@ bool phylink_expects_phy(struct phylink *pl) { if (pl->cfg_link_an_mode == MLO_AN_FIXED || (pl->cfg_link_an_mode == MLO_AN_INBAND && - phy_interface_mode_is_8023z(pl->link_config.interface))) + phy_interface_mode_is_8023z(pl->link_interface))) return false; return true; } diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 06dce78d7b0c..0666a39dc485 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -386,7 +386,8 @@ static void sfp_quirk_ubnt_uf_instant(const struct sfp_eeprom_id *id, static const struct sfp_quirk sfp_quirks[] = { // Alcatel Lucent G-010S-P can operate at 2500base-X, but incorrectly // report 2500MBd NRZ in their EEPROM - SFP_QUIRK_M("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex), + SFP_QUIRK("ALCATELLUCENT", "G010SP", sfp_quirk_2500basex, + sfp_fixup_ignore_tx_fault), // Alcatel Lucent G-010S-A can operate at 2500base-X, but report 3.2GBd // NRZ in their EEPROM diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index de14e89619c5..67d9efb05443 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -72,6 +72,17 @@ #define PPP_PROTO_LEN 2 #define PPP_LCP_HDRLEN 4 +/* The filter instructions generated by libpcap are constructed + * assuming a four-byte PPP header on each packet, where the last + * 2 bytes are the protocol field defined in the RFC and the first + * byte of the first 2 bytes indicates the direction. + * The second byte is currently unused, but we still need to initialize + * it to prevent crafted BPF programs from reading them which would + * cause reading of uninitialized data. + */ +#define PPP_FILTER_OUTBOUND_TAG 0x0100 +#define PPP_FILTER_INBOUND_TAG 0x0000 + /* * An instance of /dev/ppp can be associated with either a ppp * interface unit or a ppp channel. In both cases, file->private_data @@ -1762,10 +1773,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER - /* check if we should pass this packet */ - /* the filter instructions are constructed assuming - a four-byte PPP header on each packet */ - *(u8 *)skb_push(skb, 2) = 1; + /* check if the packet passes the pass and active filters. + * See comment for PPP_FILTER_OUTBOUND_TAG above. + */ + *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG); if (ppp->pass_filter && bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1) @@ -2482,14 +2493,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) /* network protocol frame - give it to the kernel */ #ifdef CONFIG_PPP_FILTER - /* check if the packet passes the pass and active filters */ - /* the filter instructions are constructed assuming - a four-byte PPP header on each packet */ if (ppp->pass_filter || ppp->active_filter) { if (skb_unclone(skb, GFP_ATOMIC)) goto err; - - *(u8 *)skb_push(skb, 2) = 0; + /* Check if the packet passes the pass and active filters. + * See comment for PPP_FILTER_INBOUND_TAG above. + */ + *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG); if (ppp->pass_filter && bpf_prog_run(ppp->pass_filter, skb) == 0) { if (ppp->debug & 1) diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 94ef6f9ca510..fb533c43deee 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -515,6 +515,11 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb) unsigned char *data; int islcp; + /* Ensure we can safely access protocol field and LCP code */ + if (!pskb_may_pull(skb, 3)) { + kfree_skb(skb); + return NULL; + } data = skb->data; proto = get_unaligned_be16(data); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 293eaf6b3ec9..c2327fa10747 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -984,7 +984,8 @@ static void team_port_disable(struct team *team, #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \ - NETIF_F_HIGHDMA | NETIF_F_LRO) + NETIF_F_HIGHDMA | NETIF_F_LRO | \ + NETIF_F_GSO_ENCAP_ALL) #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \ NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE) @@ -1170,6 +1171,13 @@ static int team_port_add(struct team *team, struct net_device *port_dev, return -EBUSY; } + if (netdev_has_upper_dev(port_dev, dev)) { + NL_SET_ERR_MSG(extack, "Device is already a lower device of the team interface"); + netdev_err(dev, "Device %s is already a lower device of the team interface\n", + portname); + return -EBUSY; + } + if (port_dev->features & NETIF_F_VLAN_CHALLENGED && vlan_uses_dev(dev)) { NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up"); @@ -2662,7 +2670,9 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) ctx.data.u32_val = nla_get_u32(attr_data); break; case TEAM_OPTION_TYPE_STRING: - if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) { + if (nla_len(attr_data) > TEAM_STRING_MAX_LEN || + !memchr(nla_data(attr_data), '\0', + nla_len(attr_data))) { err = -EINVAL; goto team_put; } diff --git a/drivers/net/tun.c b/drivers/net/tun.c index ea98d93138c1..03478ae3ff24 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -580,7 +580,7 @@ static inline bool tun_not_capable(struct tun_struct *tun) struct net *net = dev_net(tun->dev); return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || - (gid_valid(tun->group) && !in_egroup_p(tun->group))) && + (gid_valid(tun->group) && !in_egroup_p(tun->group))) && !ns_capable(net->user_ns, CAP_NET_ADMIN); } diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c index 46af78caf457..0bfa37c14059 100644 --- a/drivers/net/usb/gl620a.c +++ b/drivers/net/usb/gl620a.c @@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf) { dev->hard_mtu = GL_RCV_BUF_SIZE; dev->net->hard_header_len += 4; - dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in); - dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out); - return 0; + return usbnet_get_endpoints(dev, intf); } static const struct driver_info genelink_info = { diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 366e83ed0a97..feff1265cad6 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1657,13 +1657,13 @@ static int lan78xx_set_wol(struct net_device *netdev, struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]); int ret; + if (wol->wolopts & ~WAKE_ALL) + return -EINVAL; + ret = usb_autopm_get_interface(dev->intf); if (ret < 0) return ret; - if (wol->wolopts & ~WAKE_ALL) - return -EINVAL; - pdata->wol = wol->wolopts; device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts); @@ -2387,6 +2387,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) if (dev->chipid == ID_REV_CHIP_ID_7801_) { if (phy_is_pseudo_fixed_link(phydev)) { fixed_phy_unregister(phydev); + phy_device_free(phydev); } else { phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0); @@ -4246,8 +4247,10 @@ static void lan78xx_disconnect(struct usb_interface *intf) phy_disconnect(net->phydev); - if (phy_is_pseudo_fixed_link(phydev)) + if (phy_is_pseudo_fixed_link(phydev)) { fixed_phy_unregister(phydev); + phy_device_free(phydev); + } usb_scuttle_anchored_urbs(&dev->deferred); @@ -4417,29 +4420,30 @@ static int lan78xx_probe(struct usb_interface *intf, period = ep_intr->desc.bInterval; maxp = usb_maxpacket(dev->udev, dev->pipe_intr); - buf = kmalloc(maxp, GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - goto out5; - } dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL); if (!dev->urb_intr) { ret = -ENOMEM; - goto out6; - } else { - usb_fill_int_urb(dev->urb_intr, dev->udev, - dev->pipe_intr, buf, maxp, - intr_complete, dev, period); - dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; + goto out5; } + buf = kmalloc(maxp, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto free_urbs; + } + + usb_fill_int_urb(dev->urb_intr, dev->udev, + dev->pipe_intr, buf, maxp, + intr_complete, dev, period); + dev->urb_intr->transfer_flags |= URB_FREE_BUFFER; + dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out); /* Reject broken descriptors. */ if (dev->maxpacket == 0) { ret = -ENODEV; - goto out6; + goto free_urbs; } /* driver requires remote-wakeup capability during autosuspend. */ @@ -4447,7 +4451,7 @@ static int lan78xx_probe(struct usb_interface *intf, ret = lan78xx_phy_init(dev); if (ret < 0) - goto out7; + goto free_urbs; ret = register_netdev(netdev); if (ret != 0) { @@ -4469,10 +4473,8 @@ static int lan78xx_probe(struct usb_interface *intf, out8: phy_disconnect(netdev->phydev); -out7: +free_urbs: usb_free_urb(dev->urb_intr); -out6: - kfree(buf); out5: lan78xx_unbind(dev, intf); out4: diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index fe9abc4ea3af..b4c0413c6522 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1083,6 +1083,7 @@ static const struct usb_device_id products[] = { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), .driver_info = (unsigned long)&qmi_wwan_info, }, + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0122)}, /* Quectel RG650V */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */ @@ -1371,6 +1372,11 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10b0, 0)}, /* Telit FE990B */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x10d0, 0)}, /* Telit FN990B */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 958a02b19554..c2b715541989 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -9870,6 +9870,7 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) }, + { USB_DEVICE(VENDOR_ID_LENOVO, 0x3098) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) }, { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) }, @@ -9879,6 +9880,7 @@ static const struct usb_device_id rtl8152_table[] = { { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) }, { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) }, { USB_DEVICE(VENDOR_ID_DLINK, 0xb301) }, + { USB_DEVICE(VENDOR_ID_DELL, 0xb097) }, { USB_DEVICE(VENDOR_ID_ASUS, 0x1976) }, {} }; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 01a3b2417a54..ddff6f19ff98 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -71,6 +71,14 @@ #define MSR_SPEED (1<<3) #define MSR_LINK (1<<2) +/* USB endpoints */ +enum rtl8150_usb_ep { + RTL8150_USB_EP_CONTROL = 0, + RTL8150_USB_EP_BULK_IN = 1, + RTL8150_USB_EP_BULK_OUT = 2, + RTL8150_USB_EP_INT_IN = 3, +}; + /* Interrupt pipe data */ #define INT_TSR 0x00 #define INT_RSR 0x01 @@ -867,6 +875,13 @@ static int rtl8150_probe(struct usb_interface *intf, struct usb_device *udev = interface_to_usbdev(intf); rtl8150_t *dev; struct net_device *netdev; + static const u8 bulk_ep_addr[] = { + RTL8150_USB_EP_BULK_IN | USB_DIR_IN, + RTL8150_USB_EP_BULK_OUT | USB_DIR_OUT, + 0}; + static const u8 int_ep_addr[] = { + RTL8150_USB_EP_INT_IN | USB_DIR_IN, + 0}; netdev = alloc_etherdev(sizeof(rtl8150_t)); if (!netdev) @@ -880,6 +895,13 @@ static int rtl8150_probe(struct usb_interface *intf, return -ENOMEM; } + /* Verify that all required endpoints are present */ + if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) || + !usb_check_int_endpoints(intf, int_ep_addr)) { + dev_err(&intf->dev, "couldn't find required endpoints\n"); + goto out; + } + tasklet_setup(&dev->tl, rx_fixup); spin_lock_init(&dev->rx_pool_lock); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ae1282487b02..51d93422d09c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -178,6 +178,17 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress) } EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr); +static bool usbnet_needs_usb_name_format(struct usbnet *dev, struct net_device *net) +{ + /* Point to point devices which don't have a real MAC address + * (or report a fake local one) have historically used the usb%d + * naming. Preserve this.. + */ + return (dev->driver_info->flags & FLAG_POINTTOPOINT) != 0 && + (is_zero_ether_addr(net->dev_addr) || + is_local_ether_addr(net->dev_addr)); +} + static void intr_complete (struct urb *urb) { struct usbnet *dev = urb->context; @@ -519,7 +530,8 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) netif_device_present (dev->net) && test_bit(EVENT_DEV_OPEN, &dev->flags) && !test_bit (EVENT_RX_HALT, &dev->flags) && - !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) { + !test_bit (EVENT_DEV_ASLEEP, &dev->flags) && + !usbnet_going_away(dev)) { switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) { case -EPIPE: usbnet_defer_kevent (dev, EVENT_RX_HALT); @@ -540,8 +552,7 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) tasklet_schedule (&dev->bh); break; case 0: - if (!usbnet_going_away(dev)) - __usbnet_queue_skb(&dev->rxq, skb, rx_start); + __usbnet_queue_skb(&dev->rxq, skb, rx_start); } } else { netif_dbg(dev, ifdown, dev->net, "rx: stopped\n"); @@ -1761,13 +1772,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) if (status < 0) goto out1; - // heuristic: "usb%d" for links we know are two-host, - // else "eth%d" when there's reasonable doubt. userspace - // can rename the link if it knows better. + /* heuristic: rename to "eth%d" if we are not sure this link + * is two-host (these links keep "usb%d") + */ if ((dev->driver_info->flags & FLAG_ETHER) != 0 && - ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 || - /* somebody touched it*/ - !is_zero_ether_addr(net->dev_addr))) + !usbnet_needs_usb_name_format(dev, net)) strscpy(net->name, "eth%d", sizeof(net->name)); /* WLAN devices should always be named "wlan%d" */ if ((dev->driver_info->flags & FLAG_WLAN) != 0) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 8dcd3b6e143b..e1e7df00e85c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -342,7 +342,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { if (!use_napi) - dev_lstats_add(dev, length); + dev_sw_netstats_tx_add(dev, 1, length); } else { drop: atomic64_inc(&priv->dropped); @@ -357,14 +357,6 @@ drop: return ret; } -static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes) -{ - struct veth_priv *priv = netdev_priv(dev); - - dev_lstats_read(dev, packets, bytes); - return atomic64_read(&priv->dropped); -} - static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); @@ -402,24 +394,24 @@ static void veth_get_stats64(struct net_device *dev, struct veth_priv *priv = netdev_priv(dev); struct net_device *peer; struct veth_stats rx; - u64 packets, bytes; - tot->tx_dropped = veth_stats_tx(dev, &packets, &bytes); - tot->tx_bytes = bytes; - tot->tx_packets = packets; + tot->tx_dropped = atomic64_read(&priv->dropped); + dev_fetch_sw_netstats(tot, dev->tstats); veth_stats_rx(&rx, dev); tot->tx_dropped += rx.xdp_tx_err; tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; - tot->rx_bytes = rx.xdp_bytes; - tot->rx_packets = rx.xdp_packets; + tot->rx_bytes += rx.xdp_bytes; + tot->rx_packets += rx.xdp_packets; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (peer) { - veth_stats_tx(peer, &packets, &bytes); - tot->rx_bytes += bytes; - tot->rx_packets += packets; + struct rtnl_link_stats64 tot_peer = {}; + + dev_fetch_sw_netstats(&tot_peer, peer->tstats); + tot->rx_bytes += tot_peer.tx_bytes; + tot->rx_packets += tot_peer.tx_packets; veth_stats_rx(&rx, peer); tot->tx_dropped += rx.peer_tq_xdp_xmit_err; @@ -1381,25 +1373,12 @@ static void veth_free_queues(struct net_device *dev) static int veth_dev_init(struct net_device *dev) { - int err; - - dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); - if (!dev->lstats) - return -ENOMEM; - - err = veth_alloc_queues(dev); - if (err) { - free_percpu(dev->lstats); - return err; - } - - return 0; + return veth_alloc_queues(dev); } static void veth_dev_free(struct net_device *dev) { veth_free_queues(dev); - free_percpu(dev->lstats); } #ifdef CONFIG_NET_POLL_CONTROLLER @@ -1625,6 +1604,7 @@ static void veth_setup(struct net_device *dev) NETIF_F_HW_VLAN_STAG_RX); dev->needs_free_netdev = true; dev->priv_destructor = veth_dev_free; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev->max_mtu = ETH_MAX_MTU; dev->hw_features = VETH_FEATURES; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 208df4d41939..c8a1009d659e 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -121,22 +121,12 @@ struct net_vrf { int ifindex; }; -struct pcpu_dstats { - u64 tx_pkts; - u64 tx_bytes; - u64 tx_drps; - u64 rx_pkts; - u64 rx_bytes; - u64 rx_drps; - struct u64_stats_sync syncp; -}; - static void vrf_rx_stats(struct net_device *dev, int len) { struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); - dstats->rx_pkts++; + dstats->rx_packets++; dstats->rx_bytes += len; u64_stats_update_end(&dstats->syncp); } @@ -161,10 +151,10 @@ static void vrf_get_stats64(struct net_device *dev, do { start = u64_stats_fetch_begin_irq(&dstats->syncp); tbytes = dstats->tx_bytes; - tpkts = dstats->tx_pkts; - tdrops = dstats->tx_drps; + tpkts = dstats->tx_packets; + tdrops = dstats->tx_drops; rbytes = dstats->rx_bytes; - rpkts = dstats->rx_pkts; + rpkts = dstats->rx_packets; } while (u64_stats_fetch_retry_irq(&dstats->syncp, start)); stats->tx_bytes += tbytes; stats->tx_packets += tpkts; @@ -421,7 +411,7 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) vrf_rx_stats(dev, len); else - this_cpu_inc(dev->dstats->rx_drps); + this_cpu_inc(dev->dstats->rx_drops); return NETDEV_TX_OK; } @@ -616,11 +606,11 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); - dstats->tx_pkts++; + dstats->tx_packets++; dstats->tx_bytes += len; u64_stats_update_end(&dstats->syncp); } else { - this_cpu_inc(dev->dstats->tx_drps); + this_cpu_inc(dev->dstats->tx_drops); } return ret; diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 155d335c80a7..ef61eab81707 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -274,9 +274,9 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, be32_to_cpu(fdb->vni))) goto nla_put_failure; - ci.ndm_used = jiffies_to_clock_t(now - fdb->used); + ci.ndm_used = jiffies_to_clock_t(now - READ_ONCE(fdb->used)); ci.ndm_confirmed = 0; - ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); + ci.ndm_updated = jiffies_to_clock_t(now - READ_ONCE(fdb->updated)); ci.ndm_refcnt = 0; if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) @@ -482,8 +482,8 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, struct vxlan_fdb *f; f = __vxlan_find_mac(vxlan, mac, vni); - if (f && f->used != jiffies) - f->used = jiffies; + if (f && READ_ONCE(f->used) != jiffies) + WRITE_ONCE(f->used, jiffies); return f; } @@ -1057,12 +1057,12 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, !(f->flags & NTF_VXLAN_ADDED_BY_USER)) { if (f->state != state) { f->state = state; - f->updated = jiffies; + WRITE_ONCE(f->updated, jiffies); notify = 1; } if (f->flags != fdb_flags) { f->flags = fdb_flags; - f->updated = jiffies; + WRITE_ONCE(f->updated, jiffies); notify = 1; } } @@ -1096,7 +1096,7 @@ static int vxlan_fdb_update_existing(struct vxlan_dev *vxlan, } if (ndm_flags & NTF_USE) - f->used = jiffies; + WRITE_ONCE(f->used, jiffies); if (notify) { if (rd == NULL) @@ -1525,7 +1525,7 @@ static bool vxlan_snoop(struct net_device *dev, src_mac, &rdst->remote_ip.sa, &src_ip->sa); rdst->remote_ip = *src_ip; - f->updated = jiffies; + WRITE_ONCE(f->updated, jiffies); vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true, NULL); } else { u32 hash_index = fdb_head_index(vxlan, src_mac, vni); @@ -2936,7 +2936,7 @@ static void vxlan_cleanup(struct timer_list *t) if (f->flags & NTF_EXT_LEARNED) continue; - timeout = f->used + vxlan->cfg.age_interval * HZ; + timeout = READ_ONCE(f->used) + vxlan->cfg.age_interval * HZ; if (time_before_eq(timeout, jiffies)) { netdev_dbg(vxlan->dev, "garbage collect %pM\n", @@ -2982,8 +2982,11 @@ static int vxlan_init(struct net_device *dev) struct vxlan_dev *vxlan = netdev_priv(dev); int err; - if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) - vxlan_vnigroup_init(vxlan); + if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) { + err = vxlan_vnigroup_init(vxlan); + if (err) + return err; + } dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); if (!dev->tstats) { @@ -4228,6 +4231,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct vxlan_dev *vxlan = netdev_priv(dev); + bool rem_ip_changed, change_igmp; struct net_device *lowerdev; struct vxlan_config conf; struct vxlan_rdst *dst; @@ -4251,8 +4255,13 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (err) return err; + rem_ip_changed = !vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip); + change_igmp = vxlan->dev->flags & IFF_UP && + (rem_ip_changed || + dst->remote_ifindex != conf.remote_ifindex); + /* handle default dst entry */ - if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) { + if (rem_ip_changed) { u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni); spin_lock_bh(&vxlan->hash_lock[hash_index]); @@ -4296,6 +4305,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], } } + if (change_igmp && vxlan_addr_multicast(&dst->remote_ip)) + err = vxlan_multicast_leave(vxlan); + if (conf.age_interval != vxlan->cfg.age_interval) mod_timer(&vxlan->age_timer, jiffies); @@ -4303,7 +4315,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (lowerdev && lowerdev != dst->remote_dev) dst->remote_dev = lowerdev; vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true); - return 0; + + if (!err && change_igmp && + vxlan_addr_multicast(&dst->remote_ip)) + err = vxlan_multicast_join(vxlan); + + return err; } static void vxlan_dellink(struct net_device *dev, struct list_head *head) diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c index 3d113d709d19..c6d4fae958ca 100644 --- a/drivers/net/vxlan/vxlan_vnifilter.c +++ b/drivers/net/vxlan/vxlan_vnifilter.c @@ -411,6 +411,11 @@ static int vxlan_vnifilter_dump(struct sk_buff *skb, struct netlink_callback *cb struct tunnel_msg *tmsg; struct net_device *dev; + if (cb->nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct tunnel_msg))) { + NL_SET_ERR_MSG(cb->extack, "Invalid msg length"); + return -EINVAL; + } + tmsg = nlmsg_data(cb->nlh); if (tmsg->flags & ~TUNNEL_MSG_VALID_USER_FLAGS) { @@ -622,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, * default dst remote_ip previously added for this vni */ if (!vxlan_addr_any(&vninode->remote_ip) || - !vxlan_addr_any(&dst->remote_ip)) + !vxlan_addr_any(&dst->remote_ip)) { + u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, + vninode->vni); + + spin_lock_bh(&vxlan->hash_lock[hash_index]); __vxlan_fdb_delete(vxlan, all_zeros_mac, (vxlan_addr_any(&vninode->remote_ip) ? dst->remote_ip : vninode->remote_ip), @@ -630,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan, vninode->vni, vninode->vni, dst->remote_ifindex, true); + spin_unlock_bh(&vxlan->hash_lock[hash_index]); + } if (vxlan->dev->flags & IFF_UP) { if (vxlan_addr_multicast(&vninode->remote_ip) && diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index ec5c54672dfe..184c722255c6 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -9120,7 +9120,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[ {6, {2633, 2925}, {1215, 1350}, {585, 650} }, {7, {2925, 3250}, {1350, 1500}, {650, 722} }, {8, {3510, 3900}, {1620, 1800}, {780, 867} }, - {9, {3900, 4333}, {1800, 2000}, {780, 867} } + {9, {3900, 4333}, {1800, 2000}, {865, 960} } }; /*MCS parameters with Nss = 2 */ @@ -9135,7 +9135,7 @@ static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[ {6, {5265, 5850}, {2430, 2700}, {1170, 1300} }, {7, {5850, 6500}, {2700, 3000}, {1300, 1444} }, {8, {7020, 7800}, {3240, 3600}, {1560, 1733} }, - {9, {7800, 8667}, {3600, 4000}, {1560, 1733} } + {9, {7800, 8667}, {3600, 4000}, {1730, 1920} } }; static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs, diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c index 79e09c7a82b3..d0705f2f4656 100644 --- a/drivers/net/wireless/ath/ath10k/sdio.c +++ b/drivers/net/wireless/ath/ath10k/sdio.c @@ -2647,9 +2647,9 @@ static void ath10k_sdio_remove(struct sdio_func *func) netif_napi_del(&ar->napi); - ath10k_core_destroy(ar); - destroy_workqueue(ar_sdio->workqueue); + + ath10k_core_destroy(ar); } static const struct sdio_device_id ath10k_sdio_devices[] = { diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index d01616d06a32..2f6b22708b53 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -3795,6 +3795,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi, ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST && + rbm != HAL_RX_BUF_RBM_SW1_BM && rbm != HAL_RX_BUF_RBM_SW3_BM) { ab->soc_stats.invalid_rbm++; ath11k_warn(ab, "invalid return buffer manager %d\n", rbm); diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c index 7f39c6fb7408..d1785e71ffc9 100644 --- a/drivers/net/wireless/ath/ath11k/hal_rx.c +++ b/drivers/net/wireless/ath/ath11k/hal_rx.c @@ -371,7 +371,8 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc, ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, wbm_desc->buf_addr_info.info1); - if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) { + if (ret_buf_mgr != HAL_RX_BUF_RBM_SW1_BM && + ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) { ab->soc_stats.invalid_rbm++; return -EINVAL; } diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c index 86b8cb975b1a..f583e0f3932b 100644 --- a/drivers/net/wireless/ath/ath5k/pci.c +++ b/drivers/net/wireless/ath/ath5k/pci.c @@ -46,6 +46,8 @@ static const struct pci_device_id ath5k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ + { PCI_VDEVICE(ATHEROS, 0xff16) }, /* Gigaset SX76[23] AR241[34]A */ + { PCI_VDEVICE(ATHEROS, 0xff1a) }, /* Arcadyan ARV45XX AR2417 */ { PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */ { 0 } }; diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 99667aba289d..00dc97ac53b9 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -294,6 +294,9 @@ int htc_connect_service(struct htc_target *target, return -ETIMEDOUT; } + if (target->conn_rsp_epid < 0 || target->conn_rsp_epid >= ENDPOINT_MAX) + return -EINVAL; + *conn_rsp_epid = target->conn_rsp_epid; return 0; err: diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 4f00400c7ffb..58386906598a 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -691,7 +691,9 @@ static int ath9k_of_init(struct ath_softc *sc) ah->ah_flags |= AH_NO_EEP_SWAP; } - of_get_mac_address(np, common->macaddr); + ret = of_get_mac_address(np, common->macaddr); + if (ret == -EPROBE_DEFER) + return ret; return 0; } diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 6b8d2889d73f..b3a685f2ddd2 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1585,7 +1585,10 @@ static int wcn36xx_probe(struct platform_device *pdev) } n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels; - wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL); + wcn->chan_survey = devm_kcalloc(wcn->dev, + n_channels, + sizeof(struct wcn36xx_chan_survey), + GFP_KERNEL); if (!wcn->chan_survey) { ret = -ENOMEM; goto out_wq; diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 24e609c1f523..4a15c22cf348 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -2553,7 +2553,7 @@ static void at76_disconnect(struct usb_interface *interface) wiphy_info(priv->hw->wiphy, "disconnecting\n"); at76_delete_device(priv); - usb_put_dev(priv->udev); + usb_put_dev(interface_to_usbdev(interface)); dev_info(&interface->dev, "disconnected\n"); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index e300278ea38c..7cd8ec6f515a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -770,7 +770,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, sdiodev->settings->bus.sdio.txglomsz); - nents += (nents >> 4) + 1; + nents *= 2; WARN_ON(nents > sdiodev->max_segment_count); @@ -1151,6 +1151,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev) struct brcmf_bus *bus_if; struct brcmf_sdio_dev *sdiodev; mmc_pm_flag_t sdio_flags; + bool cap_power_off; int ret = 0; func = container_of(dev, struct sdio_func, dev); @@ -1158,19 +1159,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev) if (func->num != 1) return 0; + cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); bus_if = dev_get_drvdata(dev); sdiodev = bus_if->bus_priv.sdio; - if (sdiodev->wowl_enabled) { + if (sdiodev->wowl_enabled || !cap_power_off) { brcmf_sdiod_freezer_on(sdiodev); brcmf_sdio_wd_timer(sdiodev->bus, 0); sdio_flags = MMC_PM_KEEP_POWER; - if (sdiodev->settings->bus.sdio.oob_irq_supported) - enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); - else - sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + + if (sdiodev->wowl_enabled) { + if (sdiodev->settings->bus.sdio.oob_irq_supported) + enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); + else + sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; + } if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags)) brcmf_err("Failed to set pm_flags %x\n", sdio_flags); @@ -1192,18 +1197,19 @@ static int brcmf_ops_sdio_resume(struct device *dev) struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio; struct sdio_func *func = container_of(dev, struct sdio_func, dev); int ret = 0; + bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD); brcmf_dbg(SDIO, "Enter: F%d\n", func->num); if (func->num != 2) return 0; - if (!sdiodev->wowl_enabled) { + if (!sdiodev->wowl_enabled && cap_power_off) { /* bus was powered off and device removed, probe again */ ret = brcmf_sdiod_probe(sdiodev); if (ret) brcmf_err("Failed to probe device on resume\n"); } else { - if (sdiodev->settings->bus.sdio.oob_irq_supported) + if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported) disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); brcmf_sdiod_freezer_off(sdiodev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 175272c2694d..a7882a50232c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -539,6 +539,11 @@ void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success) struct ethhdr *eh; u16 type; + if (!ifp) { + brcmu_pkt_buf_free_skb(txp); + return; + } + eh = (struct ethhdr *)(txp->data); type = ntohs(eh->h_proto); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index fdd0c9abc1a1..f117c90c53f5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -89,22 +89,21 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, /* Set board-type to the first string of the machine compatible prop */ root = of_find_node_by_path("/"); if (root && err) { - char *board_type; + char *board_type = NULL; const char *tmp; - of_property_read_string_index(root, "compatible", 0, &tmp); - /* get rid of '/' in the compatible string to be able to find the FW */ - board_type = devm_kstrdup(dev, tmp, GFP_KERNEL); + if (!of_property_read_string_index(root, "compatible", 0, &tmp)) + board_type = devm_kstrdup(dev, tmp, GFP_KERNEL); + if (!board_type) { of_node_put(root); return; } strreplace(board_type, '/', '-'); settings->board_type = board_type; - - of_node_put(root); } + of_node_put(root); if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac")) return; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 85e18fb9c497..92b0839970c2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -903,14 +903,16 @@ brcmf_usb_dl_writeimage(struct brcmf_usbdev_info *devinfo, u8 *fw, int fwlen) } /* 1) Prepare USB boot loader for runtime image */ - brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); + err = brcmf_usb_dl_cmd(devinfo, DL_START, &state, sizeof(state)); + if (err) + goto fail; rdlstate = le32_to_cpu(state.state); rdlbytes = le32_to_cpu(state.bytes); /* 2) Check we are in the Waiting state */ if (rdlstate != DL_WAITING) { - brcmf_err("Failed to DL_START\n"); + brcmf_err("Invalid DL state: %u\n", rdlstate); err = -EINVAL; goto fail; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index 8580a2754789..42e7bc67e914 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -23427,6 +23427,9 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no, break; } + if (WARN_ON(k == NPHY_IQCAL_NUMGAINS)) + return; + params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1]; params->pga = tbl_iqcal_gainparams_nphy[band_idx][k][2]; params->pad = tbl_iqcal_gainparams_nphy[band_idx][k][3]; diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c index 48d6870bbf4e..9a97ab9b89ae 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c @@ -870,8 +870,8 @@ void libipw_rx_any(struct libipw_device *ieee, switch (ieee->iw_mode) { case IW_MODE_ADHOC: /* our BSS and not from/to DS */ - if (ether_addr_equal(hdr->addr3, ieee->bssid)) - if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) { + if (ether_addr_equal(hdr->addr3, ieee->bssid) && + ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == 0)) { /* promisc: get all */ if (ieee->dev->flags & IFF_PROMISC) is_packet_for_us = 1; @@ -885,8 +885,8 @@ void libipw_rx_any(struct libipw_device *ieee, break; case IW_MODE_INFRA: /* our BSS (== from our AP) and from DS */ - if (ether_addr_equal(hdr->addr2, ieee->bssid)) - if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) { + if (ether_addr_equal(hdr->addr2, ieee->bssid) && + ((fc & (IEEE80211_FCTL_TODS + IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS)) { /* promisc: get all */ if (ieee->dev->flags & IFF_PROMISC) is_packet_for_us = 1; diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index 687c906a9d72..4b1f006c105b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright (C) 2019 - 2020, 2022 Intel Corporation + * Copyright (C) 2019 - 2020, 2022 - 2023 Intel Corporation *****************************************************************************/ #include #include @@ -125,7 +125,7 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) return idx; } - return -1; + return IWL_RATE_INVALID; } static void rs_rate_scale_perform(struct iwl_priv *priv, @@ -3146,7 +3146,10 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { index = iwl_hwrate_to_plcp_idx( le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); - if (is_legacy(tbl->lq_type)) { + if (index == IWL_RATE_INVALID) { + desc += sprintf(buff + desc, " rate[%d] 0x%X invalid rate\n", + i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags)); + } else if (is_legacy(tbl->lq_type)) { desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n", i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags), iwl_rate_mcs[index].mbps); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index c96dfd7fd3dc..84980f6a0d60 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -123,7 +123,7 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, size_t expected_size) { union acpi_object *obj; - int ret = 0; + int ret; obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL, guid); if (IS_ERR(obj)) { @@ -138,8 +138,10 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, } else if (obj->type == ACPI_TYPE_BUFFER) { __le64 le_value = 0; - if (WARN_ON_ONCE(expected_size > sizeof(le_value))) - return -EINVAL; + if (WARN_ON_ONCE(expected_size > sizeof(le_value))) { + ret = -EINVAL; + goto out; + } /* if the buffer size doesn't match the expected size */ if (obj->buffer.length != expected_size) @@ -160,8 +162,9 @@ static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, } IWL_DEBUG_DEV_RADIO(dev, - "ACPI: DSM method evaluated: func=%d, ret=%d\n", - func, ret); + "ACPI: DSM method evaluated: func=%d, value=%lld\n", + func, *value); + ret = 0; out: ACPI_FREE(obj); return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 7fadaec777ce..4c5dbd8248e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -559,41 +559,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt, } /* - * alloc_sgtable - allocates scallerlist table in the given size, - * fills it with pages and returns it + * alloc_sgtable - allocates (chained) scatterlist in the given size, + * fills it with pages and returns it * @size: the size (in bytes) of the table -*/ -static struct scatterlist *alloc_sgtable(int size) + */ +static struct scatterlist *alloc_sgtable(ssize_t size) { - int alloc_size, nents, i; - struct page *new_page; - struct scatterlist *iter; - struct scatterlist *table; + struct scatterlist *result = NULL, *prev; + int nents, i, n_prev; nents = DIV_ROUND_UP(size, PAGE_SIZE); - table = kcalloc(nents, sizeof(*table), GFP_KERNEL); - if (!table) - return NULL; - sg_init_table(table, nents); - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = alloc_page(GFP_KERNEL); - if (!new_page) { - /* release all previous allocated pages in the table */ - iter = table; - for_each_sg(table, iter, sg_nents(table), i) { - new_page = sg_page(iter); - if (new_page) - __free_page(new_page); - } - kfree(table); + +#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result)) + /* + * We need an additional entry for table chaining, + * this ensures the loop can finish i.e. we can + * fit at least two entries per page (obviously, + * many more really fit.) + */ + BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2); + + while (nents > 0) { + struct scatterlist *new, *iter; + int n_fill, n_alloc; + + if (nents <= N_ENTRIES_PER_PAGE) { + /* last needed table */ + n_fill = nents; + n_alloc = nents; + nents = 0; + } else { + /* fill a page with entries */ + n_alloc = N_ENTRIES_PER_PAGE; + /* reserve one for chaining */ + n_fill = n_alloc - 1; + nents -= n_fill; + } + + new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL); + if (!new) { + if (result) + _devcd_free_sgtable(result); return NULL; } - alloc_size = min_t(int, size, PAGE_SIZE); - size -= PAGE_SIZE; - sg_set_page(iter, new_page, alloc_size, 0); + sg_init_table(new, n_alloc); + + if (!result) + result = new; + else + sg_chain(prev, n_prev, new); + prev = new; + n_prev = n_alloc; + + for_each_sg(new, iter, n_fill, i) { + struct page *new_page = alloc_page(GFP_KERNEL); + + if (!new_page) { + _devcd_free_sgtable(result); + return NULL; + } + + sg_set_page(iter, new_page, PAGE_SIZE, 0); + } } - return table; + + return result; } static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 135bd48bfe9f..cf02a2afbee5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -39,10 +39,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, } IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); +/* Assumes the appropriate lock is held by the caller */ void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt) { iwl_fw_suspend_timestamp(fwrt); - iwl_dbg_tlv_time_point(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, NULL); + iwl_dbg_tlv_time_point_sync(fwrt, IWL_FW_INI_TIME_POINT_HOST_D3_START, + NULL); } IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 024c37062a60..789393aa68cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1141,7 +1141,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, if (tlv_len != sizeof(*fseq_ver)) goto invalid_tlv_len; - IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n", + IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n", fseq_ver->version); } break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 425588605a26..f8af851474e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1286,7 +1286,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, mvm->net_detect = true; } else { - struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; + struct iwl_wowlan_config_cmd wowlan_config_cmd = { + .offloading_tid = 0, + }; wowlan_config_cmd.sta_id = mvmvif->ap_sta_id; @@ -1298,6 +1300,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, goto out_noreset; } + ret = iwl_mvm_sta_ensure_queue( + mvm, ap_sta->txq[wowlan_config_cmd.offloading_tid]); + if (ret) + goto out_noreset; + ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd, vif, mvmvif, ap_sta); if (ret) @@ -1375,7 +1382,9 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) iwl_mvm_pause_tcm(mvm, true); + mutex_lock(&mvm->mutex); iwl_fw_runtime_suspend(&mvm->fwrt); + mutex_unlock(&mvm->mutex); return __iwl_mvm_suspend(hw, wowlan, false); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 2be6801d48ac..c5c24c6ffde3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1072,10 +1072,13 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, rate->bw = RATE_MCS_CHAN_WIDTH_20; - WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX || - rate->index > IWL_RATE_MCS_9_INDEX); + if (WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX)) + rate->index = rs_ht_to_legacy[IWL_RATE_MCS_0_INDEX]; + else if (WARN_ON_ONCE(rate->index > IWL_RATE_MCS_9_INDEX)) + rate->index = rs_ht_to_legacy[IWL_RATE_MCS_9_INDEX]; + else + rate->index = rs_ht_to_legacy[rate->index]; - rate->index = rs_ht_to_legacy[rate->index]; rate->ldpc = false; } else { /* Downgrade to SISO with same MCS if in MIMO */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 105f283b777d..fce8950007bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -1272,7 +1272,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, */ u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); u32 rate_n_flags = phy_data->rate_n_flags; - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1; + u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; @@ -1327,13 +1327,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, if (he_mu) he_mu->flags2 |= - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW); - else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1) + else if (he_type == RATE_MCS_HE_TYPE_TRIG) he->data6 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) | - le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1, + le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK, rate_n_flags), IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 6b52afcf0272..46bf158eb4b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1419,6 +1419,34 @@ out_err: return ret; } +int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, + struct ieee80211_txq *txq) +{ + struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); + int ret = -EINVAL; + + lockdep_assert_held(&mvm->mutex); + + if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) || + !txq->sta) { + return 0; + } + + if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) { + set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); + ret = 0; + } + + local_bh_disable(); + spin_lock(&mvm->add_stream_lock); + if (!list_empty(&mvmtxq->list)) + list_del_init(&mvmtxq->list); + spin_unlock(&mvm->add_stream_lock); + local_bh_enable(); + + return ret; +} + void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) { struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index f1a4fc3e4038..5f7e9311e7e5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -544,6 +544,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif, bool disable); void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, struct ieee80211_txq *txq); void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk); int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 76219486b9c2..43a732b0c168 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1105,6 +1105,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, bool is_ampdu = false; int hdrlen; + if (WARN_ON_ONCE(!sta)) + return -1; + mvmsta = iwl_mvm_sta_from_mac80211(sta); fc = hdr->frame_control; hdrlen = ieee80211_hdrlen(fc); @@ -1112,9 +1115,6 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc)) return -1; - if (WARN_ON_ONCE(!mvmsta)) - return -1; - if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; @@ -1242,16 +1242,18 @@ drop: int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { - struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta; struct ieee80211_tx_info info; struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; struct sk_buff *orig_skb = skb; - if (WARN_ON_ONCE(!mvmsta)) + if (WARN_ON_ONCE(!sta)) return -1; + mvmsta = iwl_mvm_sta_from_mac80211(sta); + if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA)) return -1; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 7f30e6add993..39ac9d81d10d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -552,6 +552,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x7AF0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), IWL_DEV_INFO(0x7AF0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), + IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name), + IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name), IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name), IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name), diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c index 19152fd449ba..6fc694e8b7f8 100644 --- a/drivers/net/wireless/intersil/p54/p54spi.c +++ b/drivers/net/wireless/intersil/p54/p54spi.c @@ -623,7 +623,7 @@ static int p54spi_probe(struct spi_device *spi) gpio_direction_input(p54spi_gpio_irq); ret = request_irq(gpio_to_irq(p54spi_gpio_irq), - p54spi_interrupt, 0, "p54spi", + p54spi_interrupt, IRQF_NO_AUTOEN, "p54spi", priv->spi); if (ret < 0) { dev_err(&priv->spi->dev, "request_irq() failed"); @@ -632,8 +632,6 @@ static int p54spi_probe(struct spi_device *spi) irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); - disable_irq(gpio_to_irq(p54spi_gpio_irq)); - INIT_WORK(&priv->work, p54spi_work); init_completion(&priv->fw_comp); INIT_LIST_HEAD(&priv->tx_pending); diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 26979013ca52..9c9fe5757b29 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -842,7 +842,7 @@ struct mwifiex_ietypes_chanstats { struct mwifiex_ie_types_wildcard_ssid_params { struct mwifiex_ie_types_header header; u8 max_ssid_length; - u8 ssid[1]; + u8 ssid[]; } __packed; #define TSF_DATA_SIZE 8 diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index da2e6557e684..59225fcacd59 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -1638,7 +1638,8 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter) } ret = devm_request_irq(dev, adapter->irq_wakeup, - mwifiex_irq_wakeup_handler, IRQF_TRIGGER_LOW, + mwifiex_irq_wakeup_handler, + IRQF_TRIGGER_LOW | IRQF_NO_AUTOEN, "wifi_wake", adapter); if (ret) { dev_err(dev, "Failed to request irq_wakeup %d (%d)\n", @@ -1646,7 +1647,6 @@ static void mwifiex_probe_of(struct mwifiex_adapter *adapter) goto err_exit; } - disable_irq(adapter->irq_wakeup); if (device_init_wakeup(dev, true)) { dev_err(dev, "fail to init wakeup for mwifiex\n"); goto err_exit; diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index c406cb1a102f..e5c72a7d6e92 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -789,6 +789,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) int i; mt76_worker_disable(&dev->tx_worker); + napi_disable(&dev->tx_napi); netif_napi_del(&dev->tx_napi); for (i = 0; i < ARRAY_SIZE(dev->phys); i++) { diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c index 0a88048b8976..d35f31378ac1 100644 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c @@ -90,6 +90,10 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) #ifdef CONFIG_NL80211_TESTMODE dev->test_mtd.name = devm_kstrdup(dev->dev, part, GFP_KERNEL); + if (!dev->test_mtd.name) { + ret = -ENOMEM; + goto out_put_node; + } dev->test_mtd.offset = offset; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 55068f3252ef..d80430999219 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -21,6 +21,7 @@ static const struct usb_device_id mt76x2u_device_table[] = { { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */ + { USB_DEVICE(0x2357, 0x0137) }, /* TP-Link TL-WDN6200 */ { }, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c index bc68ede64ddb..74f5321611c4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c @@ -423,7 +423,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr) continue; ofs = addr - dev->reg.map[i].phys; - if (ofs > dev->reg.map[i].size) + if (ofs >= dev->reg.map[i].size) continue; return dev->reg.map[i].maps + ofs; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 172ba7199485..5070cc23917b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -469,7 +469,13 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } else { if (idx == *wcid_keyidx) *wcid_keyidx = -1; - goto out; + + /* For security issue we don't trigger the key deletion when + * reassociating. But we should trigger the deletion process + * to avoid using incorrect cipher after disconnection, + */ + if (vif->type != NL80211_IFTYPE_STATION || vif->cfg.assoc) + goto out; } mt76_wcid_key_setup(&dev->mt76, wcid, key); diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 0597df2729a6..1e2133670291 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -33,9 +33,9 @@ int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type, ret = usb_control_msg(udev, pipe, req, req_type, val, offset, buf, len, MT_VEND_REQ_TOUT_MS); - if (ret == -ENODEV) + if (ret == -ENODEV || ret == -EPROTO) set_bit(MT76_REMOVED, &dev->phy.state); - if (ret >= 0 || ret == -ENODEV) + if (ret >= 0 || ret == -ENODEV || ret == -EPROTO) return ret; usleep_range(5000, 10000); } diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c index 87a4ff888ddd..70d6f5244e5e 100644 --- a/drivers/net/wireless/purelifi/plfxlc/mac.c +++ b/drivers/net/wireless/purelifi/plfxlc/mac.c @@ -103,7 +103,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw) void plfxlc_mac_release(struct plfxlc_mac *mac) { plfxlc_chip_release(&mac->chip); - lockdep_assert_held(&mac->lock); } int plfxlc_op_start(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 9ccf8550a067..cd22c756acc6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -798,9 +798,10 @@ rtl8xxxu_writeN(struct rtl8xxxu_priv *priv, u16 addr, u8 *buf, u16 len) return len; write_error: - dev_info(&udev->dev, - "%s: Failed to write block at addr: %04x size: %04x\n", - __func__, addr, blocksize); + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_REG_WRITE) + dev_info(&udev->dev, + "%s: Failed to write block at addr: %04x size: %04x\n", + __func__, addr, blocksize); return -EAGAIN; } @@ -3920,8 +3921,14 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) */ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, fops->trxff_boundary); - ret = rtl8xxxu_download_firmware(priv); - dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret); + for (int retry = 5; retry >= 0 ; retry--) { + ret = rtl8xxxu_download_firmware(priv); + dev_dbg(dev, "%s: download_firmware %i\n", __func__, ret); + if (ret != -EAGAIN) + break; + if (retry) + dev_dbg(dev, "%s: retry firmware download\n", __func__); + } if (ret) goto exit; ret = rtl8xxxu_start_firmware(priv); diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 9e7e98b55eff..25570ec0918e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -452,8 +452,7 @@ static int _rtl_init_deferred_work(struct ieee80211_hw *hw) /* <1> timer */ timer_setup(&rtlpriv->works.watchdog_timer, rtl_watch_dog_timer_callback, 0); - timer_setup(&rtlpriv->works.dualmac_easyconcurrent_retrytimer, - rtl_easy_concurrent_retrytimer_callback, 0); + /* <2> work queue */ rtlpriv->works.hw = hw; rtlpriv->works.rtl_wq = wq; @@ -576,9 +575,15 @@ static void rtl_free_entries_from_ack_queue(struct ieee80211_hw *hw, void rtl_deinit_core(struct ieee80211_hw *hw) { + struct rtl_priv *rtlpriv = rtl_priv(hw); + rtl_c2hcmd_launcher(hw, 0); rtl_free_entries_from_scan_list(hw); rtl_free_entries_from_ack_queue(hw, false); + if (rtlpriv->works.rtl_wq) { + destroy_workqueue(rtlpriv->works.rtl_wq); + rtlpriv->works.rtl_wq = NULL; + } } EXPORT_SYMBOL_GPL(rtl_deinit_core); @@ -2366,19 +2371,6 @@ static void rtl_c2hcmd_wq_callback(struct work_struct *work) rtl_c2hcmd_launcher(hw, 1); } -void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t) -{ - struct rtl_priv *rtlpriv = - from_timer(rtlpriv, t, works.dualmac_easyconcurrent_retrytimer); - struct ieee80211_hw *hw = rtlpriv->hw; - struct rtl_priv *buddy_priv = rtlpriv->buddy_priv; - - if (buddy_priv == NULL) - return; - - rtlpriv->cfg->ops->dualmac_easy_concurrent(hw); -} - /********************************************************* * * frame process functions @@ -2724,9 +2716,6 @@ MODULE_AUTHOR("Larry Finger "); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core"); -struct rtl_global_var rtl_global_var = {}; -EXPORT_SYMBOL_GPL(rtl_global_var); - static int __init rtl_core_module_init(void) { BUILD_BUG_ON(TX_PWR_BY_RATE_NUM_RATE < TX_PWR_BY_RATE_NUM_SECTION); @@ -2740,10 +2729,6 @@ static int __init rtl_core_module_init(void) /* add debugfs */ rtl_debugfs_add_topdir(); - /* init some global vars */ - INIT_LIST_HEAD(&rtl_global_var.glb_priv_list); - spin_lock_init(&rtl_global_var.glb_list_lock); - return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h index 0e4f8a8ae3a5..f3a6a43a42ec 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.h +++ b/drivers/net/wireless/realtek/rtlwifi/base.h @@ -124,8 +124,6 @@ int rtl_send_smps_action(struct ieee80211_hw *hw, u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie); void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len); u8 rtl_tid_to_ac(u8 tid); -void rtl_easy_concurrent_retrytimer_callback(struct timer_list *t); -extern struct rtl_global_var rtl_global_var; void rtl_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c index 2e945554ed6d..6c8efd2b2642 100644 --- a/drivers/net/wireless/realtek/rtlwifi/efuse.c +++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c @@ -162,10 +162,19 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value) void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf) { struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 max_attempts = 10000; u32 value32; u8 readbyte; u16 retry; + /* + * In case of USB devices, transfer speeds are limited, hence + * efuse I/O reads could be (way) slower. So, decrease (a lot) + * the read attempts in case of failures. + */ + if (rtlpriv->rtlhal.interface == INTF_USB) + max_attempts = 10; + rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (_offset & 0xff)); readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2); @@ -178,7 +187,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf) retry = 0; value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); - while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) { + while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < max_attempts)) { value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]); retry++; diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 6116c1bec155..2a1bc168f771 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -295,46 +295,6 @@ static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw) return status; } -static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw, - struct rtl_priv **buddy_priv) -{ - struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw); - struct rtl_priv *tpriv = NULL, *iter; - struct rtl_pci_priv *tpcipriv = NULL; - - if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) { - list_for_each_entry(iter, &rtlpriv->glb_var->glb_priv_list, - list) { - tpcipriv = (struct rtl_pci_priv *)iter->priv; - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "pcipriv->ndis_adapter.funcnumber %x\n", - pcipriv->ndis_adapter.funcnumber); - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "tpcipriv->ndis_adapter.funcnumber %x\n", - tpcipriv->ndis_adapter.funcnumber); - - if (pcipriv->ndis_adapter.busnumber == - tpcipriv->ndis_adapter.busnumber && - pcipriv->ndis_adapter.devnumber == - tpcipriv->ndis_adapter.devnumber && - pcipriv->ndis_adapter.funcnumber != - tpcipriv->ndis_adapter.funcnumber) { - tpriv = iter; - break; - } - } - } - - rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, - "find_buddy_priv %d\n", tpriv != NULL); - - if (tpriv) - *buddy_priv = tpriv; - - return tpriv != NULL; -} - static void rtl_pci_parse_configuration(struct pci_dev *pdev, struct ieee80211_hw *hw) { @@ -443,11 +403,6 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw) if (!rtlpriv->rtlhal.earlymode_enable) return; - if (rtlpriv->dm.supp_phymode_switch && - (rtlpriv->easy_concurrent_ctl.switch_in_process || - (rtlpriv->buddy_priv && - rtlpriv->buddy_priv->easy_concurrent_ctl.switch_in_process))) - return; /* we just use em for BE/BK/VI/VO */ for (tid = 7; tid >= 0; tid--) { u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)]; @@ -1702,8 +1657,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw) synchronize_irq(rtlpci->pdev->irq); tasklet_kill(&rtlpriv->works.irq_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); - - destroy_workqueue(rtlpriv->works.rtl_wq); } static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev) @@ -2018,7 +1971,6 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev, pcipriv->ndis_adapter.amd_l1_patch); rtl_pci_parse_configuration(pdev, hw); - list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list); return true; } @@ -2165,7 +2117,6 @@ int rtl_pci_probe(struct pci_dev *pdev, rtlpriv->rtlhal.interface = INTF_PCI; rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data); rtlpriv->intf_ops = &rtl_pci_ops; - rtlpriv->glb_var = &rtl_global_var; rtl_efuse_ops_init(hw); /* MEM map */ @@ -2216,7 +2167,7 @@ int rtl_pci_probe(struct pci_dev *pdev, if (rtlpriv->cfg->ops->init_sw_vars(hw)) { pr_err("Can't init_sw_vars\n"); err = -ENODEV; - goto fail3; + goto fail2; } rtlpriv->cfg->ops->init_sw_leds(hw); @@ -2234,14 +2185,14 @@ int rtl_pci_probe(struct pci_dev *pdev, err = rtl_pci_init(hw, pdev); if (err) { pr_err("Failed to init PCI\n"); - goto fail3; + goto fail4; } err = ieee80211_register_hw(hw); if (err) { pr_err("Can't register mac80211 hw.\n"); err = -ENODEV; - goto fail3; + goto fail5; } rtlpriv->mac80211.mac80211_registered = 1; @@ -2264,16 +2215,19 @@ int rtl_pci_probe(struct pci_dev *pdev, set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); return 0; -fail3: - pci_set_drvdata(pdev, NULL); +fail5: + rtl_pci_deinit(hw); +fail4: rtl_deinit_core(hw); +fail3: + wait_for_completion(&rtlpriv->firmware_loading_complete); + rtlpriv->cfg->ops->deinit_sw_vars(hw); fail2: if (rtlpriv->io.pci_mem_start != 0) pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); pci_release_regions(pdev); - complete(&rtlpriv->firmware_loading_complete); fail1: if (hw) @@ -2324,7 +2278,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev) if (rtlpci->using_msi) pci_disable_msi(rtlpci->pdev); - list_del(&rtlpriv->list); if (rtlpriv->io.pci_mem_start != 0) { pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start); pci_release_regions(pdev); @@ -2384,7 +2337,6 @@ const struct rtl_intf_ops rtl_pci_ops = { .read_efuse_byte = read_efuse_byte, .adapter_start = rtl_pci_start, .adapter_stop = rtl_pci_stop, - .check_buddy_priv = rtl_pci_check_buddy_priv, .adapter_tx = rtl_pci_tx, .flush = rtl_pci_flush, .reset_trx_ring = rtl_pci_reset_trx_ring, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 6d352a3161b8..60d97e73ca28 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -67,22 +67,23 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context) rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD, "Firmware callback routine entered!\n"); - complete(&rtlpriv->firmware_loading_complete); if (!firmware) { pr_err("Firmware %s not available\n", fw_name); rtlpriv->max_fw_size = 0; - return; + goto exit; } if (firmware->size > rtlpriv->max_fw_size) { pr_err("Firmware is too big!\n"); rtlpriv->max_fw_size = 0; release_firmware(firmware); - return; + goto exit; } pfirmware = (struct rt_firmware *)rtlpriv->rtlhal.pfirmware; memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size); pfirmware->sz_fw_tmpbufferlen = firmware->size; release_firmware(firmware); +exit: + complete(&rtlpriv->firmware_loading_complete); } static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h index c269942b3f4a..af8d17b9e012 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.h @@ -197,9 +197,9 @@ enum rtl8821a_h2c_cmd { /* _MEDIA_STATUS_RPT_PARM_CMD1 */ #define SET_H2CCMD_MSRRPT_PARM_OPMODE(__cmd, __value) \ - u8p_replace_bits(__cmd + 1, __value, BIT(0)) + u8p_replace_bits(__cmd, __value, BIT(0)) #define SET_H2CCMD_MSRRPT_PARM_MACID_IND(__cmd, __value) \ - u8p_replace_bits(__cmd + 1, __value, BIT(1)) + u8p_replace_bits(__cmd, __value, BIT(1)) /* AP_OFFLOAD */ #define SET_H2CCMD_AP_OFFLOAD_ON(__cmd, __value) \ diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index a8eebafb9a7e..68dc0e6af6b1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -679,11 +679,6 @@ static void _rtl_usb_cleanup_rx(struct ieee80211_hw *hw) tasklet_kill(&rtlusb->rx_work_tasklet); cancel_work_sync(&rtlpriv->works.lps_change_work); - if (rtlpriv->works.rtl_wq) { - destroy_workqueue(rtlpriv->works.rtl_wq); - rtlpriv->works.rtl_wq = NULL; - } - skb_queue_purge(&rtlusb->rx_queue); while ((urb = usb_get_from_anchor(&rtlusb->rx_cleanup_urbs))) { @@ -1073,19 +1068,22 @@ int rtl_usb_probe(struct usb_interface *intf, err = ieee80211_register_hw(hw); if (err) { pr_err("Can't register mac80211 hw.\n"); - goto error_out; + goto error_init_vars; } rtlpriv->mac80211.mac80211_registered = 1; set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status); return 0; +error_init_vars: + wait_for_completion(&rtlpriv->firmware_loading_complete); + rtlpriv->cfg->ops->deinit_sw_vars(hw); error_out: + rtl_usb_deinit(hw); rtl_deinit_core(hw); error_out2: _rtl_usb_io_handler_release(hw); usb_put_dev(udev); - complete(&rtlpriv->firmware_loading_complete); kfree(rtlpriv->usb_data); ieee80211_free_hw(hw); return -ENODEV; diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 0bac788ccd6e..a8b5db365a30 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2300,7 +2300,6 @@ struct rtl_hal_ops { u32 regaddr, u32 bitmask, u32 data); void (*linked_set_reg)(struct ieee80211_hw *hw); void (*chk_switch_dmdp)(struct ieee80211_hw *hw); - void (*dualmac_easy_concurrent)(struct ieee80211_hw *hw); void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw); bool (*phy_rf6052_config)(struct ieee80211_hw *hw); void (*phy_rf6052_set_cck_txpower)(struct ieee80211_hw *hw, @@ -2336,8 +2335,6 @@ struct rtl_intf_ops { void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); int (*adapter_start)(struct ieee80211_hw *hw); void (*adapter_stop)(struct ieee80211_hw *hw); - bool (*check_buddy_priv)(struct ieee80211_hw *hw, - struct rtl_priv **buddy_priv); int (*adapter_tx)(struct ieee80211_hw *hw, struct ieee80211_sta *sta, @@ -2465,7 +2462,6 @@ struct rtl_works { /*timer */ struct timer_list watchdog_timer; - struct timer_list dualmac_easyconcurrent_retrytimer; struct timer_list fw_clockoff_timer; struct timer_list fast_antenna_training_timer; /*task */ @@ -2498,14 +2494,6 @@ struct rtl_debug { #define MIMO_PS_DYNAMIC 1 #define MIMO_PS_NOLIMIT 3 -struct rtl_dualmac_easy_concurrent_ctl { - enum band_type currentbandtype_backfordmdp; - bool close_bbandrf_for_dmsp; - bool change_to_dmdp; - bool change_to_dmsp; - bool switch_in_process; -}; - struct rtl_dmsp_ctl { bool activescan_for_slaveofdmsp; bool scan_for_anothermac_fordmsp; @@ -2590,14 +2578,6 @@ struct dig_t { u32 rssi_max; }; -struct rtl_global_var { - /* from this list we can get - * other adapter's rtl_priv - */ - struct list_head glb_priv_list; - spinlock_t glb_list_lock; -}; - #define IN_4WAY_TIMEOUT_TIME (30 * MSEC_PER_SEC) /* 30 seconds */ struct rtl_btc_info { @@ -2743,10 +2723,7 @@ struct rtl_scan_list { struct rtl_priv { struct ieee80211_hw *hw; struct completion firmware_loading_complete; - struct list_head list; struct rtl_priv *buddy_priv; - struct rtl_global_var *glb_var; - struct rtl_dualmac_easy_concurrent_ctl easy_concurrent_ctl; struct rtl_dmsp_ctl dmsp_ctl; struct rtl_locks locks; struct rtl_works works; diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c index 4620a0d5c77b..0a913cf6a615 100644 --- a/drivers/net/wireless/realtek/rtw88/main.c +++ b/drivers/net/wireless/realtek/rtw88/main.c @@ -1516,6 +1516,7 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, { const struct rtw_chip_info *chip = rtwdev->chip; struct rtw_efuse *efuse = &rtwdev->efuse; + int i; ht_cap->ht_supported = true; ht_cap->cap = 0; @@ -1535,25 +1536,20 @@ static void rtw_init_ht_cap(struct rtw_dev *rtwdev, ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap->ampdu_density = chip->ampdu_density; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (efuse->hw_cap.nss > 1) { - ht_cap->mcs.rx_mask[0] = 0xFF; - ht_cap->mcs.rx_mask[1] = 0xFF; - ht_cap->mcs.rx_mask[4] = 0x01; - ht_cap->mcs.rx_highest = cpu_to_le16(300); - } else { - ht_cap->mcs.rx_mask[0] = 0xFF; - ht_cap->mcs.rx_mask[1] = 0x00; - ht_cap->mcs.rx_mask[4] = 0x01; - ht_cap->mcs.rx_highest = cpu_to_le16(150); - } + + for (i = 0; i < efuse->hw_cap.nss; i++) + ht_cap->mcs.rx_mask[i] = 0xFF; + ht_cap->mcs.rx_mask[4] = 0x01; + ht_cap->mcs.rx_highest = cpu_to_le16(150 * efuse->hw_cap.nss); } static void rtw_init_vht_cap(struct rtw_dev *rtwdev, struct ieee80211_sta_vht_cap *vht_cap) { struct rtw_efuse *efuse = &rtwdev->efuse; - u16 mcs_map; + u16 mcs_map = 0; __le16 highest; + int i; if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE && efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT) @@ -1576,21 +1572,15 @@ static void rtw_init_vht_cap(struct rtw_dev *rtwdev, if (rtw_chip_has_rx_ldpc(rtwdev)) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; - mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 | - IEEE80211_VHT_MCS_NOT_SUPPORTED << 14; - if (efuse->hw_cap.nss > 1) { - highest = cpu_to_le16(780); - mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2; - } else { - highest = cpu_to_le16(390); - mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2; + for (i = 0; i < 8; i++) { + if (i < efuse->hw_cap.nss) + mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); + else + mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); } + highest = cpu_to_le16(390 * efuse->hw_cap.nss); + vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); vht_cap->vht_mcs.rx_highest = highest; @@ -2146,7 +2136,7 @@ void rtw_core_deinit(struct rtw_dev *rtwdev) destroy_workqueue(rtwdev->tx_wq); spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags); - skb_queue_purge(&rtwdev->tx_report.queue); + ieee80211_purge_tx_queue(rtwdev->hw, &rtwdev->tx_report.queue); skb_queue_purge(&rtwdev->coex.queue); spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags); diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index 03bd8dc53f72..08628ba3419d 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -107,6 +107,7 @@ #define BIT_SHIFT_ROM_PGE 16 #define BIT_FW_INIT_RDY BIT(15) #define BIT_FW_DW_RDY BIT(14) +#define BIT_CPU_CLK_SEL (BIT(12) | BIT(13)) #define BIT_RPWM_TOGGLE BIT(7) #define BIT_RAM_DL_SEL BIT(7) /* legacy only */ #define BIT_DMEM_CHKSUM_OK BIT(6) @@ -124,7 +125,7 @@ BIT_CHECK_SUM_OK) #define FW_READY_LEGACY (BIT_MCUFWDL_RDY | BIT_FWDL_CHK_RPT | \ BIT_WINTINI_RDY | BIT_RAM_DL_SEL) -#define FW_READY_MASK 0xffff +#define FW_READY_MASK (0xffff & ~BIT_CPU_CLK_SEL) #define REG_MCU_TST_CFG 0x84 #define VAL_FW_TRIGGER 0x1 diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 690e35c98f6e..0b071a116c58 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -957,11 +957,11 @@ static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc, } static void -rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) +rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, + u8 rs, u32 *phy_pwr_idx) { struct rtw_hal *hal = &rtwdev->hal; static const u32 offset_txagc[2] = {0x1d00, 0x1d80}; - static u32 phy_pwr_idx; u8 rate, rate_idx, pwr_index, shift; int j; @@ -969,12 +969,12 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) rate = rtw_rate_section[rs][j]; pwr_index = hal->tx_pwr_tbl[path][rate]; shift = rate & 0x3; - phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); + *phy_pwr_idx |= ((u32)pwr_index << (shift * 8)); if (shift == 0x3) { rate_idx = rate & 0xfc; rtw_write32(rtwdev, offset_txagc[path] + rate_idx, - phy_pwr_idx); - phy_pwr_idx = 0; + *phy_pwr_idx); + *phy_pwr_idx = 0; } } } @@ -982,11 +982,13 @@ rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs) static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev) { struct rtw_hal *hal = &rtwdev->hal; + u32 phy_pwr_idx = 0; int rs, path; for (path = 0; path < hal->rf_path_num; path++) { for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) - rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs); + rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs, + &phy_pwr_idx); } } diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c index ab39245e9c2f..05e6911a4044 100644 --- a/drivers/net/wireless/realtek/rtw88/tx.c +++ b/drivers/net/wireless/realtek/rtw88/tx.c @@ -169,7 +169,7 @@ void rtw_tx_report_purge_timer(struct timer_list *t) rtw_warn(rtwdev, "failed to get tx report from firmware\n"); spin_lock_irqsave(&tx_report->q_lock, flags); - skb_queue_purge(&tx_report->queue); + ieee80211_purge_tx_queue(rtwdev->hw, &tx_report->queue); spin_unlock_irqrestore(&tx_report->q_lock, flags); } diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c index cdfd66a85075..43cd06aa39b1 100644 --- a/drivers/net/wireless/realtek/rtw88/util.c +++ b/drivers/net/wireless/realtek/rtw88/util.c @@ -101,7 +101,8 @@ void rtw_desc_to_mcsrate(u16 rate, u8 *mcs, u8 *nss) *nss = 4; *mcs = rate - DESC_RATEVHT4SS_MCS0; } else if (rate >= DESC_RATEMCS0 && - rate <= DESC_RATEMCS15) { + rate <= DESC_RATEMCS31) { + *nss = 0; *mcs = rate - DESC_RATEMCS0; } } diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 1d57a8c5e97d..0f022a5192ac 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -373,7 +373,6 @@ static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 l ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { rtw89_err(rtwdev, "failed to send h2c\n"); - ret = -1; goto fail; } @@ -434,7 +433,6 @@ static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev, ret = rtw89_h2c_tx(rtwdev, skb, true); if (ret) { rtw89_err(rtwdev, "failed to send h2c\n"); - ret = -1; goto fail; } diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 3a108b13aa59..f7880499aeb0 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -105,7 +105,9 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw, mutex_lock(&rtwdev->mutex); rtwvif->rtwdev = rtwdev; - list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list); + if (!rtw89_rtwvif_in_list(rtwdev, rtwvif)) + list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list); + INIT_WORK(&rtwvif->update_beacon_work, rtw89_core_update_beacon_work); rtw89_leave_ps_mode(rtwdev); diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 6e5a740b128f..2d31193fcc87 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -334,6 +334,7 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct rtw89_dev *rtwdev = hw->priv; + wiphy_lock(wiphy); mutex_lock(&rtwdev->mutex); rtw89_leave_ps_mode(rtwdev); @@ -350,4 +351,5 @@ void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request exit: mutex_unlock(&rtwdev->mutex); + wiphy_unlock(wiphy); } diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index afb1b41e1a9a..f5dacdc4d11a 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -153,9 +153,11 @@ static void ser_state_run(struct rtw89_ser *ser, u8 evt) rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n", ser_st_name(ser), ser_ev_name(ser, evt)); + wiphy_lock(rtwdev->hw->wiphy); mutex_lock(&rtwdev->mutex); rtw89_leave_lps(rtwdev); mutex_unlock(&rtwdev->mutex); + wiphy_unlock(rtwdev->hw->wiphy); ser->st_tbl[ser->state].st_func(ser, evt); } @@ -624,9 +626,11 @@ static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt) switch (evt) { case SER_EV_STATE_IN: + wiphy_lock(rtwdev->hw->wiphy); mutex_lock(&rtwdev->mutex); ser_l2_reset_st_pre_hdl(ser); mutex_unlock(&rtwdev->mutex); + wiphy_unlock(rtwdev->hw->wiphy); ieee80211_restart_hw(rtwdev->hw); ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT); diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h index 1ae80b7561da..f9f52b5b63b9 100644 --- a/drivers/net/wireless/realtek/rtw89/util.h +++ b/drivers/net/wireless/realtek/rtw89/util.h @@ -14,6 +14,24 @@ #define rtw89_for_each_rtwvif(rtwdev, rtwvif) \ list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list) +/* Before adding rtwvif to list, we need to check if it already exist, beacase + * in some case such as SER L2 happen during WoWLAN flow, calling reconfig + * twice cause the list to be added twice. + */ +static inline bool rtw89_rtwvif_in_list(struct rtw89_dev *rtwdev, + struct rtw89_vif *new) +{ + struct rtw89_vif *rtwvif; + + lockdep_assert_held(&rtwdev->mutex); + + rtw89_for_each_rtwvif(rtwdev, rtwvif) + if (rtwvif == new) + return true; + + return false; +} + /* The result of negative dividend and positive divisor is undefined, but it * should be one case of round-down or round-up. So, make it round-down if the * result is round-up. diff --git a/drivers/net/wireless/silabs/wfx/main.c b/drivers/net/wireless/silabs/wfx/main.c index 84d82ddded56..2a3dc7d961ed 100644 --- a/drivers/net/wireless/silabs/wfx/main.c +++ b/drivers/net/wireless/silabs/wfx/main.c @@ -479,10 +479,23 @@ static int __init wfx_core_init(void) { int ret = 0; - if (IS_ENABLED(CONFIG_SPI)) + if (IS_ENABLED(CONFIG_SPI)) { ret = spi_register_driver(&wfx_spi_driver); - if (IS_ENABLED(CONFIG_MMC) && !ret) + if (ret) + goto out; + } + if (IS_ENABLED(CONFIG_MMC)) { ret = sdio_register_driver(&wfx_sdio_driver); + if (ret) + goto unregister_spi; + } + + return 0; + +unregister_spi: + if (IS_ENABLED(CONFIG_SPI)) + spi_unregister_driver(&wfx_spi_driver); +out: return ret; } module_init(wfx_core_init); diff --git a/drivers/net/wireless/ti/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c index e9dc3c72bb11..06dc74cc6cb5 100644 --- a/drivers/net/wireless/ti/wl1251/tx.c +++ b/drivers/net/wireless/ti/wl1251/tx.c @@ -342,8 +342,10 @@ void wl1251_tx_work(struct work_struct *work) while ((skb = skb_dequeue(&wl->tx_queue))) { if (!woken_up) { ret = wl1251_ps_elp_wakeup(wl); - if (ret < 0) + if (ret < 0) { + skb_queue_head(&wl->tx_queue, skb); goto out; + } woken_up = true; } diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 28c0f06e311f..b88ceb1f9800 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -2533,24 +2533,24 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) || test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) { ret = -EBUSY; - goto out; + goto out_unlock; } ret = wl12xx_init_vif_data(wl, vif); if (ret < 0) - goto out; + goto out_unlock; wlvif->wl = wl; role_type = wl12xx_get_role_type(wl, wlvif); if (role_type == WL12XX_INVALID_ROLE_TYPE) { ret = -EINVAL; - goto out; + goto out_unlock; } ret = wlcore_allocate_hw_queue_base(wl, wlvif); if (ret < 0) - goto out; + goto out_unlock; /* * TODO: after the nvs issue will be solved, move this block @@ -2565,7 +2565,7 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw, ret = wl12xx_init_fw(wl); if (ret < 0) - goto out; + goto out_unlock; } /* diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c index 63eb08c43c05..6764c13530b9 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c @@ -104,7 +104,7 @@ struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev) break; msleep(20); - } while (retries-- > 0); + } while (--retries > 0); if (!retries) { dev_err(ipc_mmio->dev, "invalid exec stage %X", stage); diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c index 04517bd3325a..a066977af0be 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include "iosm_ipc_imem.h" @@ -18,6 +19,7 @@ MODULE_LICENSE("GPL v2"); /* WWAN GUID */ static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92, 0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d); +static bool pci_registered; static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie) { @@ -448,7 +450,6 @@ static struct pci_driver iosm_ipc_driver = { }, .id_table = iosm_ipc_ids, }; -module_pci_driver(iosm_ipc_driver); int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data, size_t size, dma_addr_t *mapping, int direction) @@ -530,3 +531,56 @@ void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb) IPC_CB(skb)->mapping = 0; dev_kfree_skb(skb); } + +static int pm_notify(struct notifier_block *nb, unsigned long mode, void *_unused) +{ + if (mode == PM_HIBERNATION_PREPARE || mode == PM_RESTORE_PREPARE) { + if (pci_registered) { + pci_unregister_driver(&iosm_ipc_driver); + pci_registered = false; + } + } else if (mode == PM_POST_HIBERNATION || mode == PM_POST_RESTORE) { + if (!pci_registered) { + int ret; + + ret = pci_register_driver(&iosm_ipc_driver); + if (ret) { + pr_err(KBUILD_MODNAME ": unable to re-register PCI driver: %d\n", + ret); + } else { + pci_registered = true; + } + } + } + + return 0; +} + +static struct notifier_block pm_notifier = { + .notifier_call = pm_notify, +}; + +static int __init iosm_ipc_driver_init(void) +{ + int ret; + + ret = pci_register_driver(&iosm_ipc_driver); + if (ret) + return ret; + + pci_registered = true; + + register_pm_notifier(&pm_notifier); + + return 0; +} +module_init(iosm_ipc_driver_init); + +static void __exit iosm_ipc_driver_exit(void) +{ + unregister_pm_notifier(&pm_notifier); + + if (pci_registered) + pci_unregister_driver(&iosm_ipc_driver); +} +module_exit(iosm_ipc_driver_exit); diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c index ef70bb7c88ad..43c20deab318 100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -209,7 +209,7 @@ static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *s if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) && (mbim->rx_seq || le16_to_cpu(nth16->wSequence)) && !(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) { - net_err_ratelimited("sequence number glitch prev=%d curr=%d\n", + net_dbg_ratelimited("sequence number glitch prev=%d curr=%d\n", mbim->rx_seq, le16_to_cpu(nth16->wSequence)); } mbim->rx_seq = le16_to_cpu(nth16->wSequence); diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c index 0bcca08ff2bd..44a2081c5249 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c @@ -97,14 +97,21 @@ void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state) fsm_state_notify(ctl->md, state); } +static void fsm_release_command(struct kref *ref) +{ + struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt); + + kfree(cmd); +} + static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result) { if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { - *cmd->ret = result; - complete_all(cmd->done); + cmd->result = result; + complete_all(&cmd->done); } - kfree(cmd); + kref_put(&cmd->refcnt, fsm_release_command); } static void fsm_del_kf_event(struct t7xx_fsm_event *event) @@ -387,7 +394,6 @@ static int fsm_main_thread(void *data) int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag) { - DECLARE_COMPLETION_ONSTACK(done); struct t7xx_fsm_command *cmd; unsigned long flags; int ret; @@ -399,11 +405,13 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id INIT_LIST_HEAD(&cmd->entry); cmd->cmd_id = cmd_id; cmd->flag = flag; + kref_init(&cmd->refcnt); if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { - cmd->done = &done; - cmd->ret = &ret; + init_completion(&cmd->done); + kref_get(&cmd->refcnt); } + kref_get(&cmd->refcnt); spin_lock_irqsave(&ctl->command_lock, flags); list_add_tail(&cmd->entry, &ctl->command_queue); spin_unlock_irqrestore(&ctl->command_lock, flags); @@ -413,11 +421,11 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) { unsigned long wait_ret; - wait_ret = wait_for_completion_timeout(&done, + wait_ret = wait_for_completion_timeout(&cmd->done, msecs_to_jiffies(FSM_CMD_TIMEOUT_MS)); - if (!wait_ret) - return -ETIMEDOUT; + ret = wait_ret ? cmd->result : -ETIMEDOUT; + kref_put(&cmd->refcnt, fsm_release_command); return ret; } diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h index b1af0259d4c5..d9f2522e6b2c 100644 --- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h +++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h @@ -107,8 +107,9 @@ struct t7xx_fsm_command { struct list_head entry; enum t7xx_fsm_cmd_state cmd_id; unsigned int flag; - struct completion *done; - int *ret; + struct completion done; + int result; + struct kref refcnt; }; struct t7xx_fsm_notifier { diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 95b5ab4b964e..69ef50fb2e1b 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -867,7 +867,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev static int xennet_close(struct net_device *dev) { struct netfront_info *np = netdev_priv(dev); - unsigned int num_queues = dev->real_num_tx_queues; + unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0; unsigned int i; struct netfront_queue *queue; netif_tx_stop_all_queues(np->netdev); @@ -882,6 +882,9 @@ static void xennet_destroy_queues(struct netfront_info *info) { unsigned int i; + if (!info->queues) + return; + for (i = 0; i < info->netdev->real_num_tx_queues; i++) { struct netfront_queue *queue = &info->queues[i]; @@ -982,20 +985,27 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata, act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_TX: - get_page(pdata); xdpf = xdp_convert_buff_to_frame(xdp); - err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); - if (unlikely(!err)) - xdp_return_frame_rx_napi(xdpf); - else if (unlikely(err < 0)) + if (unlikely(!xdpf)) { trace_xdp_exception(queue->info->netdev, prog, act); + break; + } + get_page(pdata); + err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0); + if (unlikely(err <= 0)) { + if (err < 0) + trace_xdp_exception(queue->info->netdev, prog, act); + xdp_return_frame_rx_napi(xdpf); + } break; case XDP_REDIRECT: get_page(pdata); err = xdp_do_redirect(queue->info->netdev, xdp, prog); *need_xdp_flush = true; - if (unlikely(err)) + if (unlikely(err)) { trace_xdp_exception(queue->info->netdev, prog, act); + xdp_return_buff(xdp); + } break; case XDP_PASS: case XDP_DROP: diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 730f2103b91d..9c8dd5dea273 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -1323,6 +1323,7 @@ static const struct pci_device_id amd_ntb_pci_tbl[] = { { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] }, + { PCI_VDEVICE(AMD, 0x155a), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, { 0, } }; diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index 51799fccf840..6f7620b15303 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -1041,7 +1041,7 @@ static inline char *idt_get_mw_name(enum idt_mw_type mw_type) static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, unsigned char *mw_cnt) { - struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws; + struct idt_mw_cfg *mws; const struct idt_ntb_bar *bars; enum idt_mw_type mw_type; unsigned char widx, bidx, en_cnt; @@ -1049,6 +1049,11 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, int aprt_size; u32 data; + mws = devm_kcalloc(&ndev->ntb.pdev->dev, IDT_MAX_NR_MWS, + sizeof(*mws), GFP_KERNEL); + if (!mws) + return ERR_PTR(-ENOMEM); + /* Retrieve the array of the BARs registers */ bars = portdata_tbl[port].bars; @@ -1103,16 +1108,7 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, } } - /* Allocate memory for memory window descriptors */ - ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws), - GFP_KERNEL); - if (!ret_mws) - return ERR_PTR(-ENOMEM); - - /* Copy the info of detected memory windows */ - memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws)); - - return ret_mws; + return mws; } /* diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c index ffcfc3e02c35..a5aa96a31f4a 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c @@ -215,6 +215,9 @@ static int gen3_init_ntb(struct intel_ntb_dev *ndev) } ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; + /* Make sure we are not using DB's used for link status */ + if (ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) + ndev->db_valid_mask &= ~ndev->db_link_mask; ndev->reg->db_iowrite(ndev->db_valid_mask, ndev->self_mmio + diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c index 7ce65a00db56..1bdec5910001 100644 --- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -288,7 +288,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, if (size != 0 && xlate_pos < 12) return -EINVAL; - if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { + if (xlate_pos >= 0 && !IS_ALIGNED(addr, BIT_ULL(xlate_pos))) { /* * In certain circumstances we can get a buffer that is * not aligned to its size. (Most of the time diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 5b25260e5925..43b387ba9eb1 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -1351,7 +1351,7 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) qp_count = ilog2(qp_bitmap); if (nt->use_msi) { qp_count -= 1; - nt->msi_db_mask = 1 << qp_count; + nt->msi_db_mask = BIT_ULL(qp_count); ntb_db_clear_mask(ndev, nt->msi_db_mask); } diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 5a7a02408166..04690897a067 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -839,10 +839,8 @@ static int perf_copy_chunk(struct perf_thread *pthr, dma_set_unmap(tx, unmap); ret = dma_submit_error(dmaengine_submit(tx)); - if (ret) { - dmaengine_unmap_put(unmap); + if (ret) goto err_free_resource; - } dmaengine_unmap_put(unmap); diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c index 7f4a9d28b670..5cafca6ba1da 100644 --- a/drivers/nvdimm/dax_devs.c +++ b/drivers/nvdimm/dax_devs.c @@ -106,12 +106,12 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) nvdimm_bus_lock(&ndns->dev); nd_dax = nd_dax_alloc(nd_region); - nd_pfn = &nd_dax->nd_pfn; - dax_dev = nd_pfn_devinit(nd_pfn, ndns); + dax_dev = nd_dax_devinit(nd_dax, ndns); nvdimm_bus_unlock(&ndns->dev); if (!dax_dev) return -ENOMEM; pfn_sb = devm_kmalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); + nd_pfn = &nd_dax->nd_pfn; nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn, DAX_SIG); dev_dbg(dev, "dax: %s\n", rc == 0 ? dev_name(dax_dev) : ""); diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c index 082253a3a956..04f4a049599a 100644 --- a/drivers/nvdimm/label.c +++ b/drivers/nvdimm/label.c @@ -442,7 +442,8 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd) if (ndd->data) return 0; - if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) { + if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0 || + ndd->nsarea.config_size == 0) { dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n", ndd->nsarea.max_xfer, ndd->nsarea.config_size); return -ENXIO; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index ec5219680092..cce728f5409d 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -601,6 +601,13 @@ struct nd_dax *to_nd_dax(struct device *dev); int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns); bool is_nd_dax(struct device *dev); struct device *nd_dax_create(struct nd_region *nd_region); +static inline struct device *nd_dax_devinit(struct nd_dax *nd_dax, + struct nd_namespace_common *ndns) +{ + if (!nd_dax) + return NULL; + return nd_pfn_devinit(&nd_dax->nd_pfn, ndns); +} #else static inline int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 92ffeb660561..fbe3fb4fbe95 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -501,8 +501,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, switch (new_state) { case NVME_CTRL_LIVE: switch (old_state) { - case NVME_CTRL_NEW: - case NVME_CTRL_RESETTING: case NVME_CTRL_CONNECTING: changed = true; fallthrough; @@ -1609,7 +1607,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, &result); - if (status < 0) + + /* + * It's either a kernel error or the host observed a connection + * lost. In either case it's not possible communicate with the + * controller and thus enter the error code path. + */ + if (status < 0 || status == NVME_SC_HOST_PATH_ERROR) return status; /* @@ -3099,7 +3103,7 @@ int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, struct nvme_effects_log **log) { - struct nvme_effects_log *cel = xa_load(&ctrl->cels, csi); + struct nvme_effects_log *old, *cel = xa_load(&ctrl->cels, csi); int ret; if (cel) @@ -3116,7 +3120,11 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, return ret; } - xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); + old = xa_store(&ctrl->cels, csi, cel, GFP_KERNEL); + if (xa_is_err(old)) { + kfree(cel); + return xa_err(old); + } out: *log = cel; return 0; @@ -4696,6 +4704,15 @@ static void nvme_scan_work(struct work_struct *work) if (nvme_scan_ns_list(ctrl) != 0) nvme_scan_ns_sequential(ctrl); mutex_unlock(&ctrl->scan_lock); + + /* Requeue if we have missed AENs */ + if (test_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) + nvme_queue_scan(ctrl); +#ifdef CONFIG_NVME_MULTIPATH + else if (ctrl->ana_log_buf) + /* Re-read the ANA log page to not miss updates */ + queue_work(nvme_wq, &ctrl->ana_work); +#endif } /* @@ -4864,7 +4881,8 @@ static void nvme_fw_act_work(struct work_struct *work) msleep(100); } - if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) + if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) || + !nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) return; nvme_start_queues(ctrl); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 3dbf926fd99f..2b0f15de7711 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3525,8 +3525,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); spin_unlock_irqrestore(&rport->lock, flags); - if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || - !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { + if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { dev_err(ctrl->ctrl.device, "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); goto fail_ctrl; diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index b3e322e4ade3..acf73a91e87e 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -3,6 +3,7 @@ * Copyright (c) 2011-2014, Intel Corporation. * Copyright (c) 2017-2021 Christoph Hellwig. */ +#include #include /* for force_successful_syscall_return */ #include #include @@ -95,10 +96,15 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, struct request_queue *q = req->q; struct nvme_ns *ns = q->queuedata; struct block_device *bdev = ns ? ns->disk->part0 : NULL; + bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk); + bool has_metadata = meta_buffer && meta_len; struct bio *bio = NULL; void *meta = NULL; int ret; + if (has_metadata && !supports_metadata) + return -EINVAL; + if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { struct iov_iter iter; @@ -122,7 +128,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, if (bdev) bio_set_dev(bio, bdev); - if (bdev && meta_buffer && meta_len) { + if (has_metadata) { meta = nvme_add_user_metadata(req, meta_buffer, meta_len, meta_seed); if (IS_ERR(meta)) { @@ -256,8 +262,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl, { if (ns && nsid != ns->head->ns_id) { dev_err(ctrl->device, - "%s: nsid (%u) in cmd does not match nsid (%u)" - "of namespace\n", + "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n", current->comm, nsid, ns->head->ns_id); return false; } diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d439c8f42c1f..ffda86ac2381 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -153,6 +153,7 @@ struct nvme_dev { /* host memory buffer support: */ u64 host_mem_size; u32 nr_host_mem_descs; + u32 host_mem_descs_size; dma_addr_t host_mem_descs_dma; struct nvme_host_mem_buf_desc *host_mem_descs; void **host_mem_desc_bufs; @@ -375,7 +376,7 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, * as it only leads to a small amount of wasted memory for the lifetime of * the I/O. */ -static int nvme_pci_npages_prp(void) +static __always_inline int nvme_pci_npages_prp(void) { unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); @@ -951,9 +952,10 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) { + struct request *req; + spin_lock(&nvmeq->sq_lock); - while (!rq_list_empty(*rqlist)) { - struct request *req = rq_list_pop(rqlist); + while ((req = rq_list_pop(rqlist))) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_sq_copy_cmd(nvmeq, &iod->cmd); @@ -979,31 +981,25 @@ static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) static void nvme_queue_rqs(struct request **rqlist) { - struct request *req, *next, *prev = NULL; + struct request *submit_list = NULL; struct request *requeue_list = NULL; + struct request **requeue_lastp = &requeue_list; + struct nvme_queue *nvmeq = NULL; + struct request *req; - rq_list_for_each_safe(rqlist, req, next) { - struct nvme_queue *nvmeq = req->mq_hctx->driver_data; + while ((req = rq_list_pop(rqlist))) { + if (nvmeq && nvmeq != req->mq_hctx->driver_data) + nvme_submit_cmds(nvmeq, &submit_list); + nvmeq = req->mq_hctx->driver_data; - if (!nvme_prep_rq_batch(nvmeq, req)) { - /* detach 'req' and add to remainder list */ - rq_list_move(rqlist, &requeue_list, req, prev); - - req = prev; - if (!req) - continue; - } - - if (!next || req->mq_hctx != next->mq_hctx) { - /* detach rest of list, and submit */ - req->rq_next = NULL; - nvme_submit_cmds(nvmeq, rqlist); - *rqlist = next; - prev = NULL; - } else - prev = req; + if (nvme_prep_rq_batch(nvmeq, req)) + rq_list_add(&submit_list, req); /* reverse order */ + else + rq_list_add_tail(&requeue_lastp, req); } + if (nvmeq) + nvme_submit_cmds(nvmeq, &submit_list); *rqlist = requeue_list; } @@ -1158,7 +1154,9 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); + spin_lock(&nvmeq->cq_poll_lock); nvme_poll_cq(nvmeq, NULL); + spin_unlock(&nvmeq->cq_poll_lock); enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); } @@ -1932,6 +1930,18 @@ static void nvme_map_cmb(struct nvme_dev *dev) if (offset > bar_size) return; + /* + * Controllers may support a CMB size larger than their BAR, for + * example, due to being behind a bridge. Reduce the CMB to the + * reported size of the BAR + */ + size = min(size, bar_size - offset); + + if (!IS_ALIGNED(size, memremap_compat_align()) || + !IS_ALIGNED(pci_resource_start(pdev, bar), + memremap_compat_align())) + return; + /* * Tell the controller about the host side address mapping the CMB, * and enable CMB decoding for the NVMe 1.4+ scheme: @@ -1942,17 +1952,10 @@ static void nvme_map_cmb(struct nvme_dev *dev) dev->bar + NVME_REG_CMBMSC); } - /* - * Controllers may support a CMB size larger than their BAR, - * for example, due to being behind a bridge. Reduce the CMB to - * the reported size of the BAR - */ - if (size > bar_size - offset) - size = bar_size - offset; - if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { dev_warn(dev->ctrl.device, "failed to register the CMB\n"); + hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC); return; } @@ -2007,10 +2010,10 @@ static void nvme_free_host_mem(struct nvme_dev *dev) kfree(dev->host_mem_desc_bufs); dev->host_mem_desc_bufs = NULL; - dma_free_coherent(dev->dev, - dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), + dma_free_coherent(dev->dev, dev->host_mem_descs_size, dev->host_mem_descs, dev->host_mem_descs_dma); dev->host_mem_descs = NULL; + dev->host_mem_descs_size = 0; dev->nr_host_mem_descs = 0; } @@ -2018,7 +2021,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, u32 chunk_size) { struct nvme_host_mem_buf_desc *descs; - u32 max_entries, len; + u32 max_entries, len, descs_size; dma_addr_t descs_dma; int i = 0; void **bufs; @@ -2031,8 +2034,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) max_entries = dev->ctrl.hmmaxd; - descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), - &descs_dma, GFP_KERNEL); + descs_size = max_entries * sizeof(*descs); + descs = dma_alloc_coherent(dev->dev, descs_size, &descs_dma, + GFP_KERNEL); if (!descs) goto out; @@ -2061,6 +2065,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, dev->host_mem_size = size; dev->host_mem_descs = descs; dev->host_mem_descs_dma = descs_dma; + dev->host_mem_descs_size = descs_size; dev->host_mem_desc_bufs = bufs; return 0; @@ -2075,8 +2080,7 @@ out_free_bufs: kfree(bufs); out_free_descs: - dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, - descs_dma); + dma_free_coherent(dev->dev, descs_size, descs, descs_dma); out: dev->host_mem_descs = NULL; return -ENOMEM; @@ -3109,7 +3113,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) * because of high power consumption (> 2 Watt) in s2idle * sleep. Only some boards with Intel CPU are affected. */ - if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") || + if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") || + dmi_match(DMI_BOARD_NAME, "GMxPXxx") || + dmi_match(DMI_BOARD_NAME, "GXxMRXx") || dmi_match(DMI_BOARD_NAME, "PH4PG31") || dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") || dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71")) @@ -3583,12 +3589,16 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x025e, 0xf1ac), /* SOLIDIGM P44 pro SSDPFKKW020X7 */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 29489c2c52fb..4e1b91c0416b 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -694,6 +694,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue, return 0; } +static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue, + struct nvme_tcp_term_pdu *pdu) +{ + u16 fes; + const char *msg; + u32 plen = le32_to_cpu(pdu->hdr.plen); + + static const char * const msg_table[] = { + [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field", + [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error", + [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error", + [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range", + [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded", + [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter", + }; + + if (plen < NVME_TCP_MIN_C2HTERM_PLEN || + plen > NVME_TCP_MAX_C2HTERM_PLEN) { + dev_err(queue->ctrl->ctrl.device, + "Received a malformed C2HTermReq PDU (plen = %u)\n", + plen); + return; + } + + fes = le16_to_cpu(pdu->fes); + if (fes && fes < ARRAY_SIZE(msg_table)) + msg = msg_table[fes]; + else + msg = "Unknown"; + + dev_err(queue->ctrl->ctrl.device, + "Received C2HTermReq (FES = %s)\n", msg); +} + static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, unsigned int *offset, size_t *len) { @@ -715,6 +749,15 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb, return 0; hdr = queue->pdu; + if (unlikely(hdr->type == nvme_tcp_c2h_term)) { + /* + * C2HTermReq never includes Header or Data digests. + * Skip the checks. + */ + nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu); + return -EINVAL; + } + if (queue->hdr_digest) { ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); if (unlikely(ret)) @@ -1643,7 +1686,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue) cancel_work_sync(&queue->io_work); } -static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid) { struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; @@ -1657,6 +1700,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) mutex_unlock(&queue->queue_lock); } +static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid) +{ + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); + struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + int timeout = 100; + + while (timeout > 0) { + if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || + !sk_wmem_alloc_get(queue->sock->sk)) + return; + msleep(2); + timeout -= 2; + } + dev_warn(nctrl->device, + "qid %d: timeout draining sock wmem allocation expired\n", + qid); +} + +static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) +{ + nvme_tcp_stop_queue_nowait(nctrl, qid); + nvme_tcp_wait_queue(nctrl, qid); +} + + static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue) { write_lock_bh(&queue->sock->sk->sk_callback_lock); @@ -1723,7 +1791,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) int i; for (i = 1; i < ctrl->queue_count; i++) - nvme_tcp_stop_queue(ctrl, i); + nvme_tcp_stop_queue_nowait(ctrl, i); + for (i = 1; i < ctrl->queue_count; i++) + nvme_tcp_wait_queue(ctrl, i); } static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, @@ -2475,6 +2545,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) { struct nvme_tcp_queue *queue = hctx->driver_data; struct sock *sk = queue->sock->sk; + int ret; if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) return 0; @@ -2482,9 +2553,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) set_bit(NVME_TCP_Q_POLLING, &queue->flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) sk_busy_loop(sk, true); - nvme_tcp_try_recv(queue); + ret = nvme_tcp_try_recv(queue); clear_bit(NVME_TCP_Q_POLLING, &queue->flags); - return queue->nr_cqe; + return ret < 0 ? ret : queue->nr_cqe; } static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 8a02ed63b156..570c58d2b5a5 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -173,20 +173,6 @@ struct nvmet_fc_tgt_assoc { struct rcu_head rcu; }; - -static inline int -nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr) -{ - return (iodptr - iodptr->tgtport->iod); -} - -static inline int -nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr) -{ - return (fodptr - fodptr->queue->fod); -} - - /* * Association and Connection IDs: * @@ -1044,33 +1030,24 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) struct nvmet_fc_hostport *newhost, *match = NULL; unsigned long flags; + /* + * Caller holds a reference on tgtport. + */ + /* if LLDD not implemented, leave as NULL */ if (!hosthandle) return NULL; - /* - * take reference for what will be the newly allocated hostport if - * we end up using a new allocation - */ - if (!nvmet_fc_tgtport_get(tgtport)) - return ERR_PTR(-EINVAL); - spin_lock_irqsave(&tgtport->lock, flags); match = nvmet_fc_match_hostport(tgtport, hosthandle); spin_unlock_irqrestore(&tgtport->lock, flags); - if (match) { - /* no new allocation - release reference */ - nvmet_fc_tgtport_put(tgtport); + if (match) return match; - } newhost = kzalloc(sizeof(*newhost), GFP_KERNEL); - if (!newhost) { - /* no new allocation - release reference */ - nvmet_fc_tgtport_put(tgtport); + if (!newhost) return ERR_PTR(-ENOMEM); - } spin_lock_irqsave(&tgtport->lock, flags); match = nvmet_fc_match_hostport(tgtport, hosthandle); @@ -1079,6 +1056,7 @@ nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle) kfree(newhost); newhost = match; } else { + nvmet_fc_tgtport_get(tgtport); newhost->tgtport = tgtport; newhost->hosthandle = hosthandle; INIT_LIST_HEAD(&newhost->host_list); @@ -1113,7 +1091,8 @@ static void nvmet_fc_schedule_delete_assoc(struct nvmet_fc_tgt_assoc *assoc) { nvmet_fc_tgtport_get(assoc->tgtport); - queue_work(nvmet_wq, &assoc->del_work); + if (!queue_work(nvmet_wq, &assoc->del_work)) + nvmet_fc_tgtport_put(assoc->tgtport); } static struct nvmet_fc_tgt_assoc * diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index f5b8442b653d..787dfb3859a0 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -478,7 +478,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport, if (targetport) { tport = targetport->private; spin_lock(&tport->lock); - list_add_tail(&tport->ls_list, &tls_req->ls_list); + list_add_tail(&tls_req->ls_list, &tport->ls_list); spin_unlock(&tport->lock); queue_work(nvmet_wq, &tport->ls_work); } diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index c2d6cea0236b..ada58a4d0d0c 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id) */ id->nsfeat |= 1 << 4; /* NPWG = Namespace Preferred Write Granularity. 0's based */ - id->npwg = lpp0b; + id->npwg = to0based(bdev_io_min(bdev) / bdev_logical_block_size(bdev)); /* NPWA = Namespace Preferred Write Alignment. 0's based */ id->npwa = id->npwg; /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */ diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index a6d55ebb8238..298c46834a53 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -997,6 +997,27 @@ out_err: nvmet_req_complete(&cmd->req, status); } +static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue, + struct nvmet_rdma_rsp *rsp) +{ + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&queue->state_lock, flags); + /* + * recheck queue state is not live to prevent a race condition + * with RDMA_CM_EVENT_ESTABLISHED handler. + */ + if (queue->state == NVMET_RDMA_Q_LIVE) + ret = false; + else if (queue->state == NVMET_RDMA_Q_CONNECTING) + list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); + else + nvmet_rdma_put_rsp(rsp); + spin_unlock_irqrestore(&queue->state_lock, flags); + return ret; +} + static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct nvmet_rdma_cmd *cmd = @@ -1038,17 +1059,9 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) rsp->req.port = queue->port; rsp->n_rdma = 0; - if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { - unsigned long flags; - - spin_lock_irqsave(&queue->state_lock, flags); - if (queue->state == NVMET_RDMA_Q_CONNECTING) - list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); - else - nvmet_rdma_put_rsp(rsp); - spin_unlock_irqrestore(&queue->state_lock, flags); + if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) && + nvmet_rdma_recv_not_live(queue, rsp)) return; - } nvmet_rdma_handle_command(queue, rsp); } diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 81574500a57c..eee052dbf80c 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -520,10 +520,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) struct nvmet_tcp_cmd *cmd = container_of(req, struct nvmet_tcp_cmd, req); struct nvmet_tcp_queue *queue = cmd->queue; + enum nvmet_tcp_recv_state queue_state; + struct nvmet_tcp_cmd *queue_cmd; struct nvme_sgl_desc *sgl; u32 len; - if (unlikely(cmd == queue->cmd)) { + /* Pairs with store_release in nvmet_prepare_receive_pdu() */ + queue_state = smp_load_acquire(&queue->rcv_state); + queue_cmd = READ_ONCE(queue->cmd); + + if (unlikely(cmd == queue_cmd)) { sgl = &cmd->req.cmd->common.dptr.sgl; len = le32_to_cpu(sgl->length); @@ -532,7 +538,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) * Avoid using helpers, this might happen before * nvmet_req_init is completed. */ - if (queue->rcv_state == NVMET_TCP_RECV_PDU && + if (queue_state == NVMET_TCP_RECV_PDU && len && len <= cmd->req.port->inline_data_size && nvme_is_write(cmd->req.cmd)) return; @@ -784,8 +790,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue) { queue->offset = 0; queue->left = sizeof(struct nvme_tcp_hdr); - queue->cmd = NULL; - queue->rcv_state = NVMET_TCP_RECV_PDU; + WRITE_ONCE(queue->cmd, NULL); + /* Ensure rcv_state is visible only after queue->cmd is set */ + smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU); } static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue) @@ -1410,6 +1417,9 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue) { struct socket *sock = queue->sock; + if (!queue->state_change) + return; + write_lock_bh(&sock->sk->sk_callback_lock); sock->sk->sk_data_ready = queue->data_ready; sock->sk->sk_state_change = queue->state_change; diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 2325f2de9af0..c107b23e6abf 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -1532,6 +1532,8 @@ static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, si return -EINVAL; if (cell->bit_offset || cell->nbits) { + if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes) + return -EINVAL; buf = nvmem_cell_prepare_write_buffer(cell, buf, len); if (IS_ERR(buf)) return PTR_ERR(buf); diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c index 8499892044b7..729d9d879fd5 100644 --- a/drivers/nvmem/qcom-spmi-sdam.c +++ b/drivers/nvmem/qcom-spmi-sdam.c @@ -143,6 +143,7 @@ static int sdam_probe(struct platform_device *pdev) sdam->sdam_config.id = NVMEM_DEVID_AUTO; sdam->sdam_config.owner = THIS_MODULE; sdam->sdam_config.stride = 1; + sdam->sdam_config.size = sdam->size; sdam->sdam_config.word_size = 1; sdam->sdam_config.reg_read = sdam_read; sdam->sdam_config.reg_write = sdam_write; diff --git a/drivers/of/address.c b/drivers/of/address.c index 67763e5b8c0e..e93b7b527f61 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -50,7 +50,7 @@ struct of_bus { u64 (*map)(__be32 *addr, const __be32 *range, int na, int ns, int pna); int (*translate)(__be32 *addr, u64 offset, int na); - bool has_flags; + int flag_cells; unsigned int (*get_flags)(const __be32 *addr); }; @@ -95,11 +95,43 @@ static int of_bus_default_translate(__be32 *addr, u64 offset, int na) return 0; } +static unsigned int of_bus_default_flags_get_flags(const __be32 *addr) +{ + return of_read_number(addr, 1); +} + static unsigned int of_bus_default_get_flags(const __be32 *addr) { return IORESOURCE_MEM; } +static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na, + int ns, int pna) +{ + u64 cp, s, da; + + /* Check that flags match */ + if (*addr != *range) + return OF_BAD_ADDR; + + /* Read address values, skipping high cell */ + cp = of_read_number(range + 1, na - 1); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr + 1, na - 1); + + pr_debug("default flags map, cp=%llx, s=%llx, da=%llx\n", cp, s, da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int of_bus_default_flags_translate(__be32 *addr, u64 offset, int na) +{ + /* Keep "flags" part (high cell) in translated address */ + return of_bus_default_translate(addr + 1, offset, na - 1); +} + #ifdef CONFIG_PCI static unsigned int of_bus_pci_get_flags(const __be32 *addr) { @@ -189,10 +221,6 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, return da - cp; } -static int of_bus_pci_translate(__be32 *addr, u64 offset, int na) -{ - return of_bus_default_translate(addr + 1, offset, na - 1); -} #endif /* CONFIG_PCI */ int of_pci_address_to_resource(struct device_node *dev, int bar, @@ -302,11 +330,6 @@ static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns, return da - cp; } -static int of_bus_isa_translate(__be32 *addr, u64 offset, int na) -{ - return of_bus_default_translate(addr + 1, offset, na - 1); -} - static unsigned int of_bus_isa_get_flags(const __be32 *addr) { unsigned int flags = 0; @@ -319,6 +342,11 @@ static unsigned int of_bus_isa_get_flags(const __be32 *addr) return flags; } +static int of_bus_default_flags_match(struct device_node *np) +{ + return of_bus_n_addr_cells(np) == 3; +} + /* * Array of bus specific translators */ @@ -332,8 +360,8 @@ static struct of_bus of_busses[] = { .match = of_bus_pci_match, .count_cells = of_bus_pci_count_cells, .map = of_bus_pci_map, - .translate = of_bus_pci_translate, - .has_flags = true, + .translate = of_bus_default_flags_translate, + .flag_cells = 1, .get_flags = of_bus_pci_get_flags, }, #endif /* CONFIG_PCI */ @@ -344,10 +372,21 @@ static struct of_bus of_busses[] = { .match = of_bus_isa_match, .count_cells = of_bus_isa_count_cells, .map = of_bus_isa_map, - .translate = of_bus_isa_translate, - .has_flags = true, + .translate = of_bus_default_flags_translate, + .flag_cells = 1, .get_flags = of_bus_isa_get_flags, }, + /* Default with flags cell */ + { + .name = "default-flags", + .addresses = "reg", + .match = of_bus_default_flags_match, + .count_cells = of_bus_default_count_cells, + .map = of_bus_default_flags_map, + .translate = of_bus_default_flags_translate, + .flag_cells = 1, + .get_flags = of_bus_default_flags_get_flags, + }, /* Default */ { .name = "default", @@ -427,7 +466,8 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus, } if (ranges == NULL || rlen == 0) { offset = of_read_number(addr, na); - memset(addr, 0, pna * 4); + /* set address to zero, pass flags through */ + memset(addr + pbus->flag_cells, 0, (pna - pbus->flag_cells) * 4); pr_debug("empty ranges; 1:1 translation\n"); goto finish; } @@ -595,7 +635,7 @@ struct device_node *__of_get_dma_parent(const struct device_node *np) if (ret < 0) return of_get_parent(np); - return of_node_get(args.np); + return args.np; } #endif @@ -714,7 +754,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, int na = parser->na; int ns = parser->ns; int np = parser->pna + na + ns; - int busflag_na = 0; + int busflag_na = parser->bus->flag_cells; if (!range) return NULL; @@ -724,10 +764,6 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser, range->flags = parser->bus->get_flags(parser->range); - /* A extra cell for resource flags */ - if (parser->bus->has_flags) - busflag_na = 1; - range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na); if (parser->dma) diff --git a/drivers/of/base.c b/drivers/of/base.c index f849bbb9ef8c..90c934bb91da 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -974,10 +974,10 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt /* The path could begin with an alias */ if (*path != '/') { int len; - const char *p = separator; + const char *p = strchrnul(path, '/'); - if (!p) - p = strchrnul(path, '/'); + if (separator && separator < p) + p = separator; len = p - path; /* of_aliases must not be NULL */ @@ -1597,8 +1597,10 @@ int of_parse_phandle_with_args_map(const struct device_node *np, map_len--; /* Check if not found */ - if (!new) + if (!new) { + ret = -EINVAL; goto put; + } if (!of_device_is_available(new)) match = 0; @@ -1608,17 +1610,20 @@ int of_parse_phandle_with_args_map(const struct device_node *np, goto put; /* Check for malformed properties */ - if (WARN_ON(new_size > MAX_PHANDLE_ARGS)) - goto put; - if (map_len < new_size) + if (WARN_ON(new_size > MAX_PHANDLE_ARGS) || + map_len < new_size) { + ret = -EINVAL; goto put; + } /* Move forward by new node's #-cells amount */ map += new_size; map_len -= new_size; } - if (!match) + if (!match) { + ret = -ENOENT; goto put; + } /* Get the -map-pass-thru property (optional) */ pass = of_get_property(cur, pass_name, NULL); @@ -1630,7 +1635,6 @@ int of_parse_phandle_with_args_map(const struct device_node *np, * specifier into the out_args structure, keeping the * bits specified in -map-pass-thru. */ - match_array = map - new_size; for (i = 0; i < new_size; i++) { __be32 val = *(map - new_size + i); @@ -1639,6 +1643,7 @@ int of_parse_phandle_with_args_map(const struct device_node *np, val |= cpu_to_be32(out_args->args[i]) & pass[i]; } + initial_match_array[i] = val; out_args->args[i] = be32_to_cpu(val); } out_args->args_count = list_size = new_size; diff --git a/drivers/of/device.c b/drivers/of/device.c index ce225d2590b5..91d92bfe5735 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -264,14 +264,15 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T', of_node_get_device_type(dev->of_node)); tsize = csize; + if (csize >= len) + csize = len > 0 ? len - 1 : 0; len -= csize; - if (str) - str += csize; + str += csize; of_property_for_each_string(dev->of_node, "compatible", p, compat) { csize = strlen(compat) + 1; tsize += csize; - if (csize > len) + if (csize >= len) continue; csize = snprintf(str, len, "C%s", compat); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 8c402690d109..24e2273b9df2 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) "OF: " fmt +#include #include #include #include @@ -38,11 +39,15 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index) { struct of_phandle_args oirq; + unsigned int ret; if (of_irq_parse_one(dev, index, &oirq)) return 0; - return irq_create_of_mapping(&oirq); + ret = irq_create_of_mapping(&oirq); + of_node_put(oirq.np); + + return ret; } EXPORT_SYMBOL_GPL(irq_of_parse_and_map); @@ -111,6 +116,7 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph else np = of_find_node_by_phandle(be32_to_cpup(imap)); imap++; + len--; /* Check if not found */ if (!np) { @@ -164,6 +170,8 @@ const __be32 *of_irq_parse_imap_parent(const __be32 *imap, int len, struct of_ph * the specifier for each map, and then returns the translated map. * * Return: 0 on success and a negative number on error + * + * Note: refcount of node @out_irq->np is increased by 1 on success. */ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) { @@ -309,6 +317,12 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) addrsize = (imap - match_array) - intsize; if (ipar == newpar) { + /* + * We got @ipar's refcount, but the refcount was + * gotten again by of_irq_parse_imap_parent() via its + * alias @newpar. + */ + of_node_put(ipar); pr_debug("%pOF interrupt-map entry to self\n", ipar); return 0; } @@ -338,10 +352,12 @@ EXPORT_SYMBOL_GPL(of_irq_parse_raw); * This function resolves an interrupt for a node by walking the interrupt tree, * finding which interrupt controller node it is attached to, and returning the * interrupt specifier that can be used to retrieve a Linux IRQ number. + * + * Note: refcount of node @out_irq->np is increased by 1 on success. */ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq) { - struct device_node *p; + struct device_node __free(device_node) *p = NULL; const __be32 *addr; u32 intsize; int i, res, addr_len; @@ -354,6 +370,7 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar return of_irq_parse_oldworld(device, index, out_irq); /* Get the reg property (if any) */ + addr_len = 0; addr = of_get_property(device, "reg", &addr_len); /* Prevent out-of-bounds read in case of longer interrupt parent address size */ @@ -365,41 +382,33 @@ int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_ar /* Try the new-style interrupts-extended first */ res = of_parse_phandle_with_args(device, "interrupts-extended", "#interrupt-cells", index, out_irq); - if (!res) - return of_irq_parse_raw(addr_buf, out_irq); + if (!res) { + p = out_irq->np; + } else { + /* Look for the interrupt parent. */ + p = of_irq_find_parent(device); + /* Get size of interrupt specifier */ + if (!p || of_property_read_u32(p, "#interrupt-cells", &intsize)) + return -EINVAL; - /* Look for the interrupt parent. */ - p = of_irq_find_parent(device); - if (p == NULL) - return -EINVAL; + pr_debug(" parent=%pOF, intsize=%d\n", p, intsize); - /* Get size of interrupt specifier */ - if (of_property_read_u32(p, "#interrupt-cells", &intsize)) { - res = -EINVAL; - goto out; + /* Copy intspec into irq structure */ + out_irq->np = p; + out_irq->args_count = intsize; + for (i = 0; i < intsize; i++) { + res = of_property_read_u32_index(device, "interrupts", + (index * intsize) + i, + out_irq->args + i); + if (res) + return res; + } + + pr_debug(" intspec=%d\n", *out_irq->args); } - pr_debug(" parent=%pOF, intsize=%d\n", p, intsize); - - /* Copy intspec into irq structure */ - out_irq->np = p; - out_irq->args_count = intsize; - for (i = 0; i < intsize; i++) { - res = of_property_read_u32_index(device, "interrupts", - (index * intsize) + i, - out_irq->args + i); - if (res) - goto out; - } - - pr_debug(" intspec=%d\n", *out_irq->args); - - /* Check if there are any interrupt-map translations to process */ - res = of_irq_parse_raw(addr_buf, out_irq); - out: - of_node_put(p); - return res; + return of_irq_parse_raw(addr_buf, out_irq); } EXPORT_SYMBOL_GPL(of_irq_parse_one); @@ -498,8 +507,10 @@ int of_irq_count(struct device_node *dev) struct of_phandle_args irq; int nr = 0; - while (of_irq_parse_one(dev, nr, &irq) == 0) + while (of_irq_parse_one(dev, nr, &irq) == 0) { + of_node_put(irq.np); nr++; + } return nr; } @@ -616,6 +627,8 @@ void __init of_irq_init(const struct of_device_id *matches) __func__, desc->dev, desc->dev, desc->interrupt_parent); of_node_clear_flag(desc->dev, OF_POPULATED); + of_node_put(desc->interrupt_parent); + of_node_put(desc->dev); kfree(desc); continue; } @@ -646,6 +659,7 @@ void __init of_irq_init(const struct of_device_id *matches) err: list_for_each_entry_safe(desc, temp_desc, &intc_desc_list, list) { list_del(&desc->list); + of_node_put(desc->interrupt_parent); of_node_put(desc->dev); kfree(desc); } diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index f90975e00446..5a5d24eeb5f3 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -50,7 +50,8 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, memblock_phys_free(base, size); } - kmemleak_ignore_phys(base); + if (!err) + kmemleak_ignore_phys(base); return err; } diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c index b278ab4338ce..d5c1b2a126a5 100644 --- a/drivers/of/resolver.c +++ b/drivers/of/resolver.c @@ -262,25 +262,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups, */ int of_resolve_phandles(struct device_node *overlay) { - struct device_node *child, *local_fixups, *refnode; - struct device_node *tree_symbols, *overlay_fixups; + struct device_node *child, *refnode; + struct device_node *overlay_fixups; + struct device_node __free(device_node) *local_fixups = NULL; struct property *prop; const char *refpath; phandle phandle, phandle_delta; int err; - tree_symbols = NULL; - if (!overlay) { pr_err("null overlay\n"); - err = -EINVAL; - goto out; + return -EINVAL; } if (!of_node_check_flag(overlay, OF_DETACHED)) { pr_err("overlay not detached\n"); - err = -EINVAL; - goto out; + return -EINVAL; } phandle_delta = live_tree_max_phandle() + 1; @@ -292,7 +289,7 @@ int of_resolve_phandles(struct device_node *overlay) err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta); if (err) - goto out; + return err; overlay_fixups = NULL; @@ -301,16 +298,13 @@ int of_resolve_phandles(struct device_node *overlay) overlay_fixups = child; } - if (!overlay_fixups) { - err = 0; - goto out; - } + if (!overlay_fixups) + return 0; - tree_symbols = of_find_node_by_path("/__symbols__"); + struct device_node __free(device_node) *tree_symbols = of_find_node_by_path("/__symbols__"); if (!tree_symbols) { pr_err("no symbols in root of device tree.\n"); - err = -EINVAL; - goto out; + return -EINVAL; } for_each_property_of_node(overlay_fixups, prop) { @@ -324,14 +318,12 @@ int of_resolve_phandles(struct device_node *overlay) if (err) { pr_err("node label '%s' not found in live devicetree symbols table\n", prop->name); - goto out; + return err; } refnode = of_find_node_by_path(refpath); - if (!refnode) { - err = -ENOENT; - goto out; - } + if (!refnode) + return -ENOENT; phandle = refnode->phandle; of_node_put(refnode); @@ -341,11 +333,8 @@ int of_resolve_phandles(struct device_node *overlay) break; } -out: if (err) pr_err("overlay phandle fixup failed: %d\n", err); - of_node_put(tree_symbols); - return err; } EXPORT_SYMBOL_GPL(of_resolve_phandles); diff --git a/drivers/of/unittest-data/tests-address.dtsi b/drivers/of/unittest-data/tests-address.dtsi index 6604a52bf6cb..bc0029cbf8ea 100644 --- a/drivers/of/unittest-data/tests-address.dtsi +++ b/drivers/of/unittest-data/tests-address.dtsi @@ -14,7 +14,7 @@ #size-cells = <1>; /* ranges here is to make sure we don't use it for * dma-ranges translation */ - ranges = <0x70000000 0x70000000 0x40000000>, + ranges = <0x70000000 0x70000000 0x50000000>, <0x00000000 0xd0000000 0x20000000>; dma-ranges = <0x0 0x20000000 0x40000000>; @@ -43,6 +43,13 @@ <0x42000000 0x0 0xc0000000 0x20000000 0x0 0x10000000>; }; + bus@a0000000 { + #address-cells = <3>; + #size-cells = <2>; + ranges = <0xf00baa 0x0 0x0 0xa0000000 0x0 0x100000>, + <0xf00bee 0x1 0x0 0xb0000000 0x0 0x200000>; + }; + }; }; }; diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index ce1386074e66..598e0891533f 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -1019,6 +1019,113 @@ static void __init of_unittest_pci_dma_ranges(void) of_node_put(np); } +static void __init of_unittest_bus_ranges(void) +{ + struct device_node *np; + struct of_range range; + struct of_range_parser parser; + int i = 0; + + np = of_find_node_by_path("/testcase-data/address-tests"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + if (of_range_parser_init(&parser, np)) { + pr_err("missing ranges property\n"); + return; + } + + /* + * Get the "ranges" from the device tree + */ + for_each_of_range(&parser, &range) { + unittest(range.flags == IORESOURCE_MEM, + "for_each_of_range wrong flags on node %pOF flags=%x (expected %x)\n", + np, range.flags, IORESOURCE_MEM); + if (!i) { + unittest(range.size == 0x50000000, + "for_each_of_range wrong size on node %pOF size=%llx\n", + np, range.size); + unittest(range.cpu_addr == 0x70000000, + "for_each_of_range wrong CPU addr (%llx) on node %pOF", + range.cpu_addr, np); + unittest(range.bus_addr == 0x70000000, + "for_each_of_range wrong bus addr (%llx) on node %pOF", + range.pci_addr, np); + } else { + unittest(range.size == 0x20000000, + "for_each_of_range wrong size on node %pOF size=%llx\n", + np, range.size); + unittest(range.cpu_addr == 0xd0000000, + "for_each_of_range wrong CPU addr (%llx) on node %pOF", + range.cpu_addr, np); + unittest(range.bus_addr == 0x00000000, + "for_each_of_range wrong bus addr (%llx) on node %pOF", + range.pci_addr, np); + } + i++; + } + + of_node_put(np); +} + +static void __init of_unittest_bus_3cell_ranges(void) +{ + struct device_node *np; + struct of_range range; + struct of_range_parser parser; + int i = 0; + + np = of_find_node_by_path("/testcase-data/address-tests/bus@a0000000"); + if (!np) { + pr_err("missing testcase data\n"); + return; + } + + if (of_range_parser_init(&parser, np)) { + pr_err("missing ranges property\n"); + return; + } + + /* + * Get the "ranges" from the device tree + */ + for_each_of_range(&parser, &range) { + if (!i) { + unittest(range.flags == 0xf00baa, + "for_each_of_range wrong flags on node %pOF flags=%x\n", + np, range.flags); + unittest(range.size == 0x100000, + "for_each_of_range wrong size on node %pOF size=%llx\n", + np, range.size); + unittest(range.cpu_addr == 0xa0000000, + "for_each_of_range wrong CPU addr (%llx) on node %pOF", + range.cpu_addr, np); + unittest(range.bus_addr == 0x0, + "for_each_of_range wrong bus addr (%llx) on node %pOF", + range.pci_addr, np); + } else { + unittest(range.flags == 0xf00bee, + "for_each_of_range wrong flags on node %pOF flags=%x\n", + np, range.flags); + unittest(range.size == 0x200000, + "for_each_of_range wrong size on node %pOF size=%llx\n", + np, range.size); + unittest(range.cpu_addr == 0xb0000000, + "for_each_of_range wrong CPU addr (%llx) on node %pOF", + range.cpu_addr, np); + unittest(range.bus_addr == 0x100000000, + "for_each_of_range wrong bus addr (%llx) on node %pOF", + range.pci_addr, np); + } + i++; + } + + of_node_put(np); +} + static void __init of_unittest_parse_interrupts(void) { struct device_node *np; @@ -3521,6 +3628,8 @@ static int __init of_unittest(void) of_unittest_dma_get_max_cpu_address(); of_unittest_parse_dma_ranges(); of_unittest_pci_dma_ranges(); + of_unittest_bus_ranges(); + of_unittest_bus_3cell_ranges(); of_unittest_match_node(); of_unittest_platform_populate(); of_unittest_overlay(); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 71d3e3ba909a..4211cfb27350 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -104,11 +104,30 @@ struct opp_table *_find_opp_table(struct device *dev) * representation in the OPP table and manage the clock configuration themselves * in an platform specific way. */ -static bool assert_single_clk(struct opp_table *opp_table) +static bool assert_single_clk(struct opp_table *opp_table, + unsigned int __always_unused index) { return !WARN_ON(opp_table->clk_count > 1); } +/* + * Returns true if clock table is large enough to contain the clock index. + */ +static bool assert_clk_index(struct opp_table *opp_table, + unsigned int index) +{ + return opp_table->clk_count > index; +} + +/* + * Returns true if bandwidth table is large enough to contain the bandwidth index. + */ +static bool assert_bandwidth_index(struct opp_table *opp_table, + unsigned int index) +{ + return opp_table->path_count > index; +} + /** * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp * @opp: opp for which voltage has to be returned for @@ -180,25 +199,24 @@ unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) EXPORT_SYMBOL_GPL(dev_pm_opp_get_power); /** - * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp - * @opp: opp for which frequency has to be returned for + * dev_pm_opp_get_freq_indexed() - Gets the frequency corresponding to an + * available opp with specified index + * @opp: opp for which frequency has to be returned for + * @index: index of the frequency within the required opp * - * Return: frequency in hertz corresponding to the opp, else - * return 0 + * Return: frequency in hertz corresponding to the opp with specified index, + * else return 0 */ -unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) +unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index) { - if (IS_ERR_OR_NULL(opp)) { + if (IS_ERR_OR_NULL(opp) || index >= opp->opp_table->clk_count) { pr_err("%s: Invalid parameters\n", __func__); return 0; } - if (!assert_single_clk(opp->opp_table)) - return 0; - - return opp->rates[0]; + return opp->rates[index]; } -EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); +EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq_indexed); /** * dev_pm_opp_get_level() - Gets the level corresponding to an available opp @@ -497,12 +515,12 @@ static struct dev_pm_opp *_opp_table_find_key(struct opp_table *opp_table, unsigned long (*read)(struct dev_pm_opp *opp, int index), bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, unsigned long opp_key, unsigned long key), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); /* Assert that the requirement is met */ - if (assert && !assert(opp_table)) + if (assert && !assert(opp_table, index)) return ERR_PTR(-EINVAL); mutex_lock(&opp_table->lock); @@ -530,7 +548,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), bool (*compare)(struct dev_pm_opp **opp, struct dev_pm_opp *temp_opp, unsigned long opp_key, unsigned long key), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { struct opp_table *opp_table; struct dev_pm_opp *opp; @@ -553,7 +571,7 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available, static struct dev_pm_opp *_find_key_exact(struct device *dev, unsigned long key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { /* * The value of key will be updated here, but will be ignored as the @@ -566,7 +584,7 @@ static struct dev_pm_opp *_find_key_exact(struct device *dev, static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _opp_table_find_key(opp_table, key, index, available, read, _compare_ceil, assert); @@ -575,7 +593,7 @@ static struct dev_pm_opp *_opp_table_find_key_ceil(struct opp_table *opp_table, static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _find_key(dev, key, index, available, read, _compare_ceil, assert); @@ -584,7 +602,7 @@ static struct dev_pm_opp *_find_key_ceil(struct device *dev, unsigned long *key, static struct dev_pm_opp *_find_key_floor(struct device *dev, unsigned long *key, int index, bool available, unsigned long (*read)(struct dev_pm_opp *opp, int index), - bool (*assert)(struct opp_table *opp_table)) + bool (*assert)(struct opp_table *opp_table, unsigned int index)) { return _find_key(dev, key, index, available, read, _compare_floor, assert); @@ -621,6 +639,35 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); +/** + * dev_pm_opp_find_freq_exact_indexed() - Search for an exact freq for the + * clock corresponding to the index + * @dev: Device for which we do this operation + * @freq: frequency to search for + * @index: Clock index + * @available: true/false - match for available opp + * + * Search for the matching exact OPP for the clock corresponding to the + * specified index from a starting freq for a device. + * + * Return: matching *opp , else returns ERR_PTR in case of error and should be + * handled using IS_ERR. Error return values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp * +dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, + u32 index, bool available) +{ + return _find_key_exact(dev, freq, index, available, _read_freq, + assert_clk_index); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact_indexed); + static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, unsigned long *freq) { @@ -653,6 +700,35 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); +/** + * dev_pm_opp_find_freq_ceil_indexed() - Search for a rounded ceil freq for the + * clock corresponding to the index + * @dev: Device for which we do this operation + * @freq: Start frequency + * @index: Clock index + * + * Search for the matching ceil *available* OPP for the clock corresponding to + * the specified index from a starting freq for a device. + * + * Return: matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp * +dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, + u32 index) +{ + return _find_key_ceil(dev, freq, index, true, _read_freq, + assert_clk_index); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil_indexed); + /** * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq * @dev: device for which we do this operation @@ -678,6 +754,34 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, } EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); +/** + * dev_pm_opp_find_freq_floor_indexed() - Search for a rounded floor freq for the + * clock corresponding to the index + * @dev: Device for which we do this operation + * @freq: Start frequency + * @index: Clock index + * + * Search for the matching floor *available* OPP for the clock corresponding to + * the specified index from a starting freq for a device. + * + * Return: matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. Error return + * values can be: + * EINVAL: for bad pointer + * ERANGE: no match found for search + * ENODEV: if device not found in list of registered devices + * + * The callers are required to call dev_pm_opp_put() for the returned OPP after + * use. + */ +struct dev_pm_opp * +dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, + u32 index) +{ + return _find_key_floor(dev, freq, index, true, _read_freq, assert_clk_index); +} +EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor_indexed); + /** * dev_pm_opp_find_level_exact() - search for an exact level * @dev: device for which we do this operation @@ -752,7 +856,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, unsigned long temp = *bw; struct dev_pm_opp *opp; - opp = _find_key_ceil(dev, &temp, index, true, _read_bw, NULL); + opp = _find_key_ceil(dev, &temp, index, true, _read_bw, + assert_bandwidth_index); *bw = temp; return opp; } @@ -783,7 +888,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev, unsigned long temp = *bw; struct dev_pm_opp *opp; - opp = _find_key_floor(dev, &temp, index, true, _read_bw, NULL); + opp = _find_key_floor(dev, &temp, index, true, _read_bw, + assert_bandwidth_index); *bw = temp; return opp; } @@ -1588,7 +1694,7 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) if (IS_ERR(opp_table)) return; - if (!assert_single_clk(opp_table)) + if (!assert_single_clk(opp_table, 0)) goto put_table; mutex_lock(&opp_table->lock); @@ -1939,7 +2045,7 @@ int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long tol; int ret; - if (!assert_single_clk(opp_table)) + if (!assert_single_clk(opp_table, 0)) return -EINVAL; new_opp = _opp_allocate(opp_table); @@ -2795,7 +2901,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, return r; } - if (!assert_single_clk(opp_table)) { + if (!assert_single_clk(opp_table, 0)) { r = -EINVAL; goto put_table; } @@ -2871,7 +2977,7 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq, return r; } - if (!assert_single_clk(opp_table)) { + if (!assert_single_clk(opp_table, 0)) { r = -EINVAL; goto put_table; } diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 605d68673f92..c1b2d8927845 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -950,7 +950,7 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, ret = _of_opp_alloc_required_opps(opp_table, new_opp); if (ret) - goto free_opp; + goto put_node; if (!of_property_read_u32(np, "clock-latency-ns", &val)) new_opp->clock_latency_ns = val; @@ -1003,6 +1003,8 @@ static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table, free_required_opps: _of_opp_free_required_opps(opp_table, new_opp); +put_node: + of_node_put(np); free_opp: _opp_free(new_opp); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 4605758d3214..2e921ddbe748 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2612,6 +2612,7 @@ enum parport_pc_pci_cards { netmos_9815, netmos_9901, netmos_9865, + asix_ax99100, quatech_sppxp100, wch_ch382l, brainboxes_uc146, @@ -2678,6 +2679,7 @@ static struct parport_pc_pci { /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } }, /* netmos_9901 */ { 1, { { 0, -1 }, } }, /* netmos_9865 */ { 1, { { 0, -1 }, } }, + /* asix_ax99100 */ { 1, { { 0, 1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, /* wch_ch382l */ { 1, { { 2, -1 }, } }, /* brainboxes_uc146 */ { 1, { { 3, -1 }, } }, @@ -2770,6 +2772,9 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { 0xA000, 0x1000, 0, 0, netmos_9865 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 0xA000, 0x2000, 0, 0, netmos_9865 }, + /* ASIX AX99100 PCIe to Multi I/O Controller */ + { PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100, + 0xA000, 0x2000, 0, 0, asix_ax99100 }, /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index 55c028af4bd9..c4f9ac5c00c1 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -184,6 +184,12 @@ config PCI_P2PDMA P2P DMA transactions must be between devices behind the same root port. + Enabling this option will reduce the entropy of x86 KASLR memory + regions. For example - on a 46 bit system, the entropy goes down + from 16 bits to 15 bits. The actual reduction in entropy depends + on the physical address bits, on processor features, kernel config + (5 level page table) and physical memory present on the system. + If unsure, say N. config PCI_LABEL diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index b8b655d4047e..a87dab9abba2 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -354,8 +354,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, spin_unlock_irqrestore(&ep->lock, flags); offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | - CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | - CDNS_PCIE_MSG_NO_DATA; + CDNS_PCIE_NORMAL_MSG_CODE(msg_code); writel(0, ep->irq_cpu_addr + offset); } diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index 190786e47df9..2891e8c16a59 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -240,7 +240,7 @@ struct cdns_pcie_rp_ib_bar { #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) #define CDNS_PCIE_NORMAL_MSG_CODE(code) \ (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) -#define CDNS_PCIE_MSG_NO_DATA BIT(16) +#define CDNS_PCIE_MSG_DATA BIT(16) struct cdns_pcie; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index a3acf144a40d..4f20094faee5 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -1172,11 +1172,10 @@ static int imx6_pcie_probe(struct platform_device *pdev) if (IS_ERR(imx6_pcie->pcie_aux)) return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux), "pcie_aux clock source missing or invalid\n"); - fallthrough; - case IMX7D: if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR) imx6_pcie->controller_id = 1; - + fallthrough; + case IMX7D: imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, "pciephy"); if (IS_ERR(imx6_pcie->pciephy_reset)) { diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index ae5506293557..a4f25a98a531 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -465,6 +465,17 @@ static void __iomem *ks_pcie_other_map_bus(struct pci_bus *bus, struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); u32 reg; + /* + * Checking whether the link is up here is a last line of defense + * against platforms that forward errors on the system bus as + * SError upon PCI configuration transactions issued when the link + * is down. This check is racy by definition and does not stop + * the system from triggering an SError if the link goes down + * after this check is performed. + */ + if (!dw_pcie_link_up(pci)) + return NULL; + reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); if (!pci_is_root_bus(bus->parent)) @@ -1105,6 +1116,7 @@ static int ks_pcie_am654_set_mode(struct device *dev, static const struct ks_pcie_of_data ks_pcie_rc_of_data = { .host_ops = &ks_pcie_host_ops, + .mode = DW_PCIE_RC_TYPE, .version = DW_PCIE_VER_365A, }; diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 449ad709495d..3b3f079d0d2d 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -283,7 +283,7 @@ static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, u32 index; struct dw_pcie *pci = to_dw_pcie_from_ep(ep); - for (index = 0; index < pci->num_ob_windows; index++) { + for_each_set_bit(index, ep->ob_window_map, pci->num_ob_windows) { if (ep->outbound_addr[index] != addr) continue; *atu_index = index; diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 521acd632f1a..c89ad1f92a07 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -281,8 +281,8 @@ static int brcm_pcie_encode_ibar_size(u64 size) if (log2_in >= 12 && log2_in <= 15) /* Covers 4KB to 32KB (inclusive) */ return (log2_in - 12) + 0x1c; - else if (log2_in >= 16 && log2_in <= 35) - /* Covers 64KB to 32GB, (inclusive) */ + else if (log2_in >= 16 && log2_in <= 36) + /* Covers 64KB to 64GB, (inclusive) */ return log2_in - 15; /* Something is awry so disable */ return 0; @@ -383,10 +383,10 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie) static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen) { u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); - u32 lnkcap = readl(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); lnkcap = (lnkcap & ~PCI_EXP_LNKCAP_SLS) | gen; - writel(lnkcap, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCAP); + writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY); lnkctl2 = (lnkctl2 & ~0xf) | gen; writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2); @@ -1117,7 +1117,8 @@ static int brcm_pcie_add_bus(struct pci_bus *bus) ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); if (ret) { - dev_info(dev, "No regulators for downstream device\n"); + dev_info(dev, "Did not get regulators, err=%d\n", ret); + pcie->sr = NULL; goto no_regulators; } @@ -1140,7 +1141,7 @@ static void brcm_pcie_remove_bus(struct pci_bus *bus) struct subdev_regulators *sr = pcie->sr; struct device *dev = &bus->dev; - if (!sr) + if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) return; if (regulator_bulk_disable(sr->num_supplies, sr->supplies)) @@ -1487,7 +1488,7 @@ static struct pci_ops brcm7425_pcie_ops = { static int brcm_pcie_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node, *msi_np; + struct device_node *np = pdev->dev.of_node; struct pci_host_bridge *bridge; const struct pcie_cfg_data *data; struct brcm_pcie *pcie; @@ -1562,9 +1563,14 @@ static int brcm_pcie_probe(struct platform_device *pdev) goto fail; } - msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); - if (pci_msi_enabled() && msi_np == pcie->np) { - ret = brcm_pcie_enable_msi(pcie); + if (pci_msi_enabled()) { + struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0); + + if (msi_np == pcie->np) + ret = brcm_pcie_enable_msi(pcie); + + of_node_put(msi_np); + if (ret) { dev_err(pcie->dev, "probe of internal MSI failed"); goto fail; @@ -1613,3 +1619,4 @@ module_platform_driver(brcm_pcie_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Broadcom STB PCIe RC driver"); MODULE_AUTHOR("Broadcom"); +MODULE_SOFTDEP("pre: irq_bcm2712_mip"); diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c index f9682df1da61..209719fb6ddc 100644 --- a/drivers/pci/controller/pcie-rcar-ep.c +++ b/drivers/pci/controller/pcie-rcar-ep.c @@ -107,7 +107,7 @@ static int rcar_pcie_parse_outbound_ranges(struct rcar_pcie_endpoint *ep, } if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), - outbound_name)) { + res->name)) { dev_err(pcie->dev, "Cannot request memory region %s.\n", outbound_name); return -EIO; diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index ab9d62d14ae9..dfb347e7db0f 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -63,15 +63,25 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region)); } +static int rockchip_pcie_ep_ob_atu_num_bits(struct rockchip_pcie *rockchip, + u64 pci_addr, size_t size) +{ + int num_pass_bits = fls64(pci_addr ^ (pci_addr + size - 1)); + + return clamp(num_pass_bits, + ROCKCHIP_PCIE_AT_MIN_NUM_BITS, + ROCKCHIP_PCIE_AT_MAX_NUM_BITS); +} + static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, u32 r, u64 cpu_addr, u64 pci_addr, size_t size) { - int num_pass_bits = fls64(size - 1); + int num_pass_bits; u32 addr0, addr1, desc0; - if (num_pass_bits < 8) - num_pass_bits = 8; + num_pass_bits = rockchip_pcie_ep_ob_atu_num_bits(rockchip, + pci_addr, size); addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 2370186d1530..c3c95f7be593 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -262,6 +262,10 @@ (PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12))) #define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \ (PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12))) + +#define ROCKCHIP_PCIE_AT_MIN_NUM_BITS 8 +#define ROCKCHIP_PCIE_AT_MAX_NUM_BITS 20 + #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ (PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008) #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c index e4ab48041eb6..53cf701bb228 100644 --- a/drivers/pci/controller/pcie-xilinx-cpm.c +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -595,15 +595,17 @@ static int xilinx_cpm_pcie_probe(struct platform_device *pdev) return err; bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); - if (!bus) - return -ENODEV; + if (!bus) { + err = -ENODEV; + goto err_free_irq_domains; + } port->variant = of_device_get_match_data(dev); err = xilinx_cpm_pcie_parse_dt(port, bus->res); if (err) { dev_err(dev, "Parsing DT failed\n"); - goto err_parse_dt; + goto err_free_irq_domains; } xilinx_cpm_pcie_init_port(port); @@ -627,7 +629,7 @@ err_host_bridge: xilinx_cpm_free_interrupts(port); err_setup_irq: pci_ecam_free(port->cfg); -err_parse_dt: +err_free_irq_domains: xilinx_cpm_free_irq_domains(port); return err; } diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index 5c35884c226e..771ff0f6971f 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -17,6 +17,8 @@ #include #include +#include + #include #define VMD_CFGBAR 0 @@ -110,7 +112,7 @@ struct vmd_irq_list { struct vmd_dev { struct pci_dev *dev; - spinlock_t cfg_lock; + raw_spinlock_t cfg_lock; void __iomem *cfgbar; int msix_count; @@ -387,7 +389,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, if (!addr) return -EFAULT; - spin_lock_irqsave(&vmd->cfg_lock, flags); + raw_spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: *value = readb(addr); @@ -402,7 +404,7 @@ static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, ret = -EINVAL; break; } - spin_unlock_irqrestore(&vmd->cfg_lock, flags); + raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } @@ -422,7 +424,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, if (!addr) return -EFAULT; - spin_lock_irqsave(&vmd->cfg_lock, flags); + raw_spin_lock_irqsave(&vmd->cfg_lock, flags); switch (len) { case 1: writeb(value, addr); @@ -440,7 +442,7 @@ static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, ret = -EINVAL; break; } - spin_unlock_irqrestore(&vmd->cfg_lock, flags); + raw_spin_unlock_irqrestore(&vmd->cfg_lock, flags); return ret; } @@ -870,6 +872,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) dev_set_msi_domain(&vmd->bus->dev, dev_get_msi_domain(&vmd->dev->dev)); + WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, + "domain"), "Can't create symlink to domain\n"); + vmd_acpi_begin(); pci_scan_child_bus(vmd->bus); @@ -907,9 +912,6 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) pci_bus_add_devices(vmd->bus); vmd_acpi_end(); - - WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, - "domain"), "Can't create symlink to domain\n"); return 0; } @@ -919,6 +921,24 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) struct vmd_dev *vmd; int err; + if (xen_domain()) { + /* + * Xen doesn't have knowledge about devices in the VMD bus + * because the config space of devices behind the VMD bridge is + * not known to Xen, and hence Xen cannot discover or configure + * them in any way. + * + * Bypass of MSI remapping won't work in that case as direct + * write by Linux to the MSI entries won't result in functional + * interrupts, as Xen is the entity that manages the host + * interrupt controller and must configure interrupts. However + * multiplexing of interrupts by the VMD bridge will work under + * Xen, so force the usage of that mode which must always be + * supported by VMD bridges. + */ + features &= ~VMD_FEAT_CAN_BYPASS_MSI_REMAP; + } + if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) return -ENOMEM; @@ -958,7 +978,7 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) if (features & VMD_FEAT_OFFSET_FIRST_VECTOR) vmd->first_vec = 1; - spin_lock_init(&vmd->cfg_lock); + raw_spin_lock_init(&vmd->cfg_lock); pci_set_drvdata(dev, vmd); err = vmd_enable_domain(vmd, features); if (err) @@ -985,8 +1005,8 @@ static void vmd_remove(struct pci_dev *dev) { struct vmd_dev *vmd = pci_get_drvdata(dev); - sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_stop_root_bus(vmd->bus); + sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); pci_remove_root_bus(vmd->bus); vmd_cleanup_srcu(vmd); vmd_detach_resources(vmd); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index 2e8a4de2abab..bcf4f2ca82b3 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -251,7 +251,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) fail_back_rx: dma_release_channel(epf_test->dma_chan_rx); - epf_test->dma_chan_tx = NULL; + epf_test->dma_chan_rx = NULL; fail_back_tx: dma_cap_zero(mask); @@ -328,7 +328,6 @@ static void pci_epf_test_print_rate(const char *ops, u64 size, static int pci_epf_test_copy(struct pci_epf_test *epf_test) { int ret; - bool use_dma; void __iomem *src_addr; void __iomem *dst_addr; phys_addr_t src_phys_addr; @@ -373,16 +372,9 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test) } ktime_get_ts64(&start); - use_dma = !!(reg->flags & FLAG_USE_DMA); - if (use_dma) { - if (!epf_test->dma_supported) { - dev_err(dev, "Cannot transfer data using DMA\n"); - ret = -EINVAL; - goto err_map_addr; - } - - if (epf_test->dma_private) { - dev_err(dev, "Cannot transfer data using DMA\n"); + if (reg->flags & FLAG_USE_DMA) { + if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) { + dev_err(dev, "DMA controller doesn't support MEMCPY\n"); ret = -EINVAL; goto err_map_addr; } @@ -406,7 +398,8 @@ static int pci_epf_test_copy(struct pci_epf_test *epf_test) kfree(buf); } ktime_get_ts64(&end); - pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); + pci_epf_test_print_rate("COPY", reg->size, &start, &end, + reg->flags & FLAG_USE_DMA); err_map_addr: pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); @@ -430,7 +423,6 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test) void __iomem *src_addr; void *buf; u32 crc32; - bool use_dma; phys_addr_t phys_addr; phys_addr_t dst_phys_addr; struct timespec64 start, end; @@ -463,14 +455,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test) goto err_map_addr; } - use_dma = !!(reg->flags & FLAG_USE_DMA); - if (use_dma) { - if (!epf_test->dma_supported) { - dev_err(dev, "Cannot transfer data using DMA\n"); - ret = -EINVAL; - goto err_dma_map; - } - + if (reg->flags & FLAG_USE_DMA) { dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, DMA_FROM_DEVICE); if (dma_mapping_error(dma_dev, dst_phys_addr)) { @@ -495,7 +480,8 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test) ktime_get_ts64(&end); } - pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma); + pci_epf_test_print_rate("READ", reg->size, &start, &end, + reg->flags & FLAG_USE_DMA); crc32 = crc32_le(~0, buf, reg->size); if (crc32 != reg->checksum) @@ -519,7 +505,6 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) int ret; void __iomem *dst_addr; void *buf; - bool use_dma; phys_addr_t phys_addr; phys_addr_t src_phys_addr; struct timespec64 start, end; @@ -555,14 +540,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) get_random_bytes(buf, reg->size); reg->checksum = crc32_le(~0, buf, reg->size); - use_dma = !!(reg->flags & FLAG_USE_DMA); - if (use_dma) { - if (!epf_test->dma_supported) { - dev_err(dev, "Cannot transfer data using DMA\n"); - ret = -EINVAL; - goto err_dma_map; - } - + if (reg->flags & FLAG_USE_DMA) { src_phys_addr = dma_map_single(dma_dev, buf, reg->size, DMA_TO_DEVICE); if (dma_mapping_error(dma_dev, src_phys_addr)) { @@ -589,7 +567,8 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test) ktime_get_ts64(&end); } - pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma); + pci_epf_test_print_rate("WRITE", reg->size, &start, &end, + reg->flags & FLAG_USE_DMA); /* * wait 1ms inorder for the write to complete. Without this delay L3 @@ -660,6 +639,12 @@ static void pci_epf_test_cmd_handler(struct work_struct *work) reg->command = 0; reg->status = 0; + if ((READ_ONCE(reg->flags) & FLAG_USE_DMA) && + !epf_test->dma_supported) { + dev_err(dev, "Cannot transfer data using DMA\n"); + goto reset_handler; + } + if (reg->irq_type > IRQ_TYPE_MSIX) { dev_err(dev, "Failed to detect IRQ type\n"); goto reset_handler; diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 3bc9273d0a08..eb502426ea6c 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -613,7 +613,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, if (type == SECONDARY_INTERFACE && epf->sec_epc) return -EBUSY; - mutex_lock(&epc->lock); + mutex_lock(&epc->list_lock); func_no = find_first_zero_bit(&epc->function_num_map, BITS_PER_LONG); if (func_no >= BITS_PER_LONG) { @@ -640,7 +640,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, list_add_tail(list, &epc->pci_epf); ret: - mutex_unlock(&epc->lock); + mutex_unlock(&epc->list_lock); return ret; } @@ -664,19 +664,19 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, if (!epc || IS_ERR(epc) || !epf) return; + mutex_lock(&epc->list_lock); if (type == PRIMARY_INTERFACE) { func_no = epf->func_no; list = &epf->list; + epf->epc = NULL; } else { func_no = epf->sec_epc_func_no; list = &epf->sec_epc_list; + epf->sec_epc = NULL; } - - mutex_lock(&epc->lock); clear_bit(func_no, &epc->function_num_map); list_del(list); - epf->epc = NULL; - mutex_unlock(&epc->lock); + mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_remove_epf); @@ -740,7 +740,7 @@ void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc) { int r; - r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match, + r = devres_release(dev, devm_pci_epc_release, devm_pci_epc_match, epc); dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n"); } @@ -773,6 +773,7 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, } mutex_init(&epc->lock); + mutex_init(&epc->list_lock); INIT_LIST_HEAD(&epc->pci_epf); ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier); diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 9ed556936f48..7d21f134143c 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -234,6 +234,7 @@ void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf) mutex_lock(&epf_pf->lock); clear_bit(epf_vf->vfunc_no, &epf_pf->vfunction_num_map); + epf_vf->epf_pf = NULL; list_del(&epf_vf->list); mutex_unlock(&epf_pf->lock); } diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c index 3b248426a9f4..a35af42d6a3d 100644 --- a/drivers/pci/hotplug/cpqphp_pci.c +++ b/drivers/pci/hotplug/cpqphp_pci.c @@ -135,11 +135,13 @@ int cpqhp_unconfigure_device(struct pci_func *func) static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value) { u32 vendID = 0; + int ret; - if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1) - return -1; - if (vendID == 0xffffffff) - return -1; + ret = pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID); + if (ret != PCIBIOS_SUCCESSFUL) + return PCIBIOS_DEVICE_NOT_FOUND; + if (PCI_POSSIBLE_ERROR(vendID)) + return PCIBIOS_DEVICE_NOT_FOUND; return pci_bus_read_config_dword(bus, devfn, offset, value); } @@ -202,13 +204,15 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_ { u16 tdevice; u32 work; + int ret; u8 tbus; ctrl->pci_bus->number = bus_num; for (tdevice = 0; tdevice < 0xFF; tdevice++) { /* Scan for access first */ - if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) + ret = PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work); + if (ret) continue; dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); /* Yep we got one. Not a bridge ? */ @@ -220,7 +224,8 @@ static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_ } for (tdevice = 0; tdevice < 0xFF; tdevice++) { /* Scan for access first */ - if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) + ret = PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work); + if (ret) continue; dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); /* Yep we got one. bridge ? */ @@ -253,7 +258,7 @@ static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num *dev_num = tdevice; ctrl->pci_bus->number = tbus; pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); - if (!nobridge || (work == 0xffffffff)) + if (!nobridge || PCI_POSSIBLE_ERROR(work)) return 0; dbg("bus_num %d devfn %d\n", *bus_num, *dev_num); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 358f077284cb..1ee5f7a9c741 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -840,7 +840,9 @@ void pcie_enable_interrupt(struct controller *ctrl) { u16 mask; - mask = PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_DLLSCE; + mask = PCI_EXP_SLTCTL_DLLSCE; + if (!pciehp_poll_mode) + mask |= PCI_EXP_SLTCTL_HPIE; pcie_write_cmd(ctrl, mask, mask); } diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index a4967beb384d..8e1322c3898c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -517,6 +517,31 @@ static ssize_t bus_rescan_store(struct device *dev, static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL, bus_rescan_store); +static ssize_t reset_subordinate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_bus *bus = pdev->subordinate; + unsigned long val; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (kstrtoul(buf, 0, &val) < 0) + return -EINVAL; + + if (val) { + int ret = __pci_reset_bus(bus); + + if (ret) + return ret; + } + + return count; +} +static DEVICE_ATTR_WO(reset_subordinate); + #if defined(CONFIG_PM) && defined(CONFIG_ACPI) static ssize_t d3cold_allowed_store(struct device *dev, struct device_attribute *attr, @@ -621,6 +646,7 @@ static struct attribute *pci_dev_attrs[] = { static struct attribute *pci_bridge_attrs[] = { &dev_attr_subordinate_bus_number.attr, &dev_attr_secondary_bus_number.attr, + &dev_attr_reset_subordinate.attr, NULL, }; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 0baf5c03ef4c..10436a193b3b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -5308,7 +5308,7 @@ static ssize_t reset_method_store(struct device *dev, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); - char *options, *name; + char *options, *tmp_options, *name; int m, n; u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 }; @@ -5328,7 +5328,8 @@ static ssize_t reset_method_store(struct device *dev, return -ENOMEM; n = 0; - while ((name = strsep(&options, " ")) != NULL) { + tmp_options = options; + while ((name = strsep(&tmp_options, " ")) != NULL) { if (sysfs_streq(name, "")) continue; @@ -5932,7 +5933,7 @@ EXPORT_SYMBOL_GPL(pci_probe_reset_bus); * * Same as above except return -EAGAIN if the bus cannot be locked */ -static int __pci_reset_bus(struct pci_bus *bus) +int __pci_reset_bus(struct pci_bus *bus) { int rc; @@ -6807,60 +6808,70 @@ static void pci_no_domains(void) } #ifdef CONFIG_PCI_DOMAINS_GENERIC -static atomic_t __domain_nr = ATOMIC_INIT(-1); +static DEFINE_IDA(pci_domain_nr_static_ida); +static DEFINE_IDA(pci_domain_nr_dynamic_ida); -static int pci_get_new_domain_nr(void) +static void of_pci_reserve_static_domain_nr(void) { - return atomic_inc_return(&__domain_nr); + struct device_node *np; + int domain_nr; + + for_each_node_by_type(np, "pci") { + domain_nr = of_get_pci_domain_nr(np); + if (domain_nr < 0) + continue; + /* + * Permanently allocate domain_nr in dynamic_ida + * to prevent it from dynamic allocation. + */ + ida_alloc_range(&pci_domain_nr_dynamic_ida, + domain_nr, domain_nr, GFP_KERNEL); + } } static int of_pci_bus_find_domain_nr(struct device *parent) { - static int use_dt_domains = -1; - int domain = -1; + static bool static_domains_reserved = false; + int domain_nr; - if (parent) - domain = of_get_pci_domain_nr(parent->of_node); - - /* - * Check DT domain and use_dt_domains values. - * - * If DT domain property is valid (domain >= 0) and - * use_dt_domains != 0, the DT assignment is valid since this means - * we have not previously allocated a domain number by using - * pci_get_new_domain_nr(); we should also update use_dt_domains to - * 1, to indicate that we have just assigned a domain number from - * DT. - * - * If DT domain property value is not valid (ie domain < 0), and we - * have not previously assigned a domain number from DT - * (use_dt_domains != 1) we should assign a domain number by - * using the: - * - * pci_get_new_domain_nr() - * - * API and update the use_dt_domains value to keep track of method we - * are using to assign domain numbers (use_dt_domains = 0). - * - * All other combinations imply we have a platform that is trying - * to mix domain numbers obtained from DT and pci_get_new_domain_nr(), - * which is a recipe for domain mishandling and it is prevented by - * invalidating the domain value (domain = -1) and printing a - * corresponding error. - */ - if (domain >= 0 && use_dt_domains) { - use_dt_domains = 1; - } else if (domain < 0 && use_dt_domains != 1) { - use_dt_domains = 0; - domain = pci_get_new_domain_nr(); - } else { - if (parent) - pr_err("Node %pOF has ", parent->of_node); - pr_err("Inconsistent \"linux,pci-domain\" property in DT\n"); - domain = -1; + /* On the first call scan device tree for static allocations. */ + if (!static_domains_reserved) { + of_pci_reserve_static_domain_nr(); + static_domains_reserved = true; } - return domain; + if (parent) { + /* + * If domain is in DT, allocate it in static IDA. This + * prevents duplicate static allocations in case of errors + * in DT. + */ + domain_nr = of_get_pci_domain_nr(parent->of_node); + if (domain_nr >= 0) + return ida_alloc_range(&pci_domain_nr_static_ida, + domain_nr, domain_nr, + GFP_KERNEL); + } + + /* + * If domain was not specified in DT, choose a free ID from dynamic + * allocations. All domain numbers from DT are permanently in + * dynamic allocations to prevent assigning them to other DT nodes + * without static domain. + */ + return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL); +} + +static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) +{ + if (bus->domain_nr < 0) + return; + + /* Release domain from IDA where it was allocated. */ + if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr) + ida_free(&pci_domain_nr_static_ida, bus->domain_nr); + else + ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr); } int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) @@ -6868,6 +6879,13 @@ int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) return acpi_disabled ? of_pci_bus_find_domain_nr(parent) : acpi_pci_bus_find_domain_nr(bus); } + +void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) +{ + if (!acpi_disabled) + return; + of_pci_bus_release_domain_nr(bus, parent); +} #endif /** diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 5d5813aa5b45..38ad75ce52c3 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -37,6 +37,7 @@ bool pci_reset_supported(struct pci_dev *dev); void pci_init_reset_methods(struct pci_dev *dev); int pci_bridge_secondary_bus_reset(struct pci_dev *dev); int pci_bus_error_reset(struct pci_dev *dev); +int __pci_reset_bus(struct pci_bus *bus); struct pci_cap_saved_data { u16 cap_nr; diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 5426f450ce91..1b59e6fc7439 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -1384,6 +1384,22 @@ static int aer_probe(struct pcie_device *dev) return 0; } +static int aer_suspend(struct pcie_device *dev) +{ + struct aer_rpc *rpc = get_service_data(dev); + + aer_disable_rootport(rpc); + return 0; +} + +static int aer_resume(struct pcie_device *dev) +{ + struct aer_rpc *rpc = get_service_data(dev); + + aer_enable_rootport(rpc); + return 0; +} + /** * aer_root_reset - reset Root Port hierarchy, RCEC, or RCiEP * @dev: pointer to Root Port, RCEC, or RCiEP @@ -1455,6 +1471,8 @@ static struct pcie_port_service_driver aerdriver = { .service = PCIE_PORT_SERVICE_AER, .probe = aer_probe, + .suspend = aer_suspend, + .resume = aer_resume, .remove = aer_remove, }; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 133ebb7841c5..9815cf96c6ea 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1026,16 +1026,16 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) parent_link = link->parent; /* - * link->downstream is a pointer to the pci_dev of function 0. If - * we remove that function, the pci_dev is about to be deallocated, - * so we can't use link->downstream again. Free the link state to - * avoid this. + * Free the parent link state, no later than function 0 (i.e. + * link->downstream) being removed. * - * If we're removing a non-0 function, it's possible we could - * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends - * programming the same ASPM Control value for all functions of - * multi-function devices, so disable ASPM for all of them. + * Do not free the link state any earlier. If function 0 is a + * switch upstream port, this link state is parent_link to all + * subordinate ones. */ + if (pdev != link->downstream) + goto out; + pcie_config_aspm_link(link, 0); list_del(&link->sibling); free_link_state(link); @@ -1046,6 +1046,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) pcie_config_aspm_path(parent_link); } + out: mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); } diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 1ac7fec47d6f..332315183cd4 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -214,10 +214,12 @@ static int get_port_device_capability(struct pci_dev *dev) /* * Disable hot-plug interrupts in case they have been enabled - * by the BIOS and the hot-plug service driver is not loaded. + * by the BIOS and the hot-plug service driver won't be loaded + * to handle them. */ - pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, - PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); + if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) + pcie_capability_clear_word(dev, PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); } #ifdef CONFIG_PCIEAER diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index e19b79821dd6..03f7550d8982 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -888,6 +888,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) resource_size_t offset, next_offset; LIST_HEAD(resources); struct resource *res, *next_res; + bool bus_registered = false; char addr[64], *fmt; const char *name; int err; @@ -906,6 +907,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) bus->domain_nr = pci_bus_find_domain_nr(bus, parent); else bus->domain_nr = bridge->domain_nr; + if (bus->domain_nr < 0) { + err = bus->domain_nr; + goto free; + } #endif b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); @@ -926,10 +931,9 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) /* Temporarily move resources off the list */ list_splice_init(&bridge->windows, &resources); err = device_add(&bridge->dev); - if (err) { - put_device(&bridge->dev); + if (err) goto free; - } + bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); @@ -948,6 +952,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) name = dev_name(&bus->dev); err = device_register(&bus->dev); + bus_registered = true; if (err) goto unregister; @@ -1031,9 +1036,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) unregister: put_device(&bridge->dev); device_del(&bridge->dev); - free: - kfree(bus); +#ifdef CONFIG_PCI_DOMAINS_GENERIC + pci_bus_release_domain_nr(bus, parent); +#endif + if (bus_registered) + put_device(&bus->dev); + else + kfree(bus); + return err; } @@ -1142,7 +1153,10 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, add_dev: pci_set_bus_msi_domain(child); ret = device_register(&child->dev); - WARN_ON(ret < 0); + if (WARN_ON(ret < 0)) { + put_device(&child->dev); + return NULL; + } pcibios_add_bus(child); @@ -1593,23 +1607,33 @@ static void set_pcie_thunderbolt(struct pci_dev *dev) static void set_pcie_untrusted(struct pci_dev *dev) { - struct pci_dev *parent; + struct pci_dev *parent = pci_upstream_bridge(dev); + if (!parent) + return; /* - * If the upstream bridge is untrusted we treat this device + * If the upstream bridge is untrusted we treat this device as * untrusted as well. */ - parent = pci_upstream_bridge(dev); - if (parent && (parent->untrusted || parent->external_facing)) + if (parent->untrusted) { dev->untrusted = true; + return; + } + + if (arch_pci_dev_is_removable(dev)) { + pci_dbg(dev, "marking as untrusted\n"); + dev->untrusted = true; + } } static void pci_set_removable(struct pci_dev *dev) { struct pci_dev *parent = pci_upstream_bridge(dev); + if (!parent) + return; /* - * We (only) consider everything downstream from an external_facing + * We (only) consider everything tunneled below an external_facing * device to be removable by the user. We're mainly concerned with * consumer platforms with user accessible thunderbolt ports that are * vulnerable to DMA attacks, and we expect those ports to be marked by @@ -1619,9 +1643,15 @@ static void pci_set_removable(struct pci_dev *dev) * accessible to user / may not be removed by end user, and thus not * exposed as "removable" to userspace. */ - if (parent && - (parent->external_facing || dev_is_removable(&parent->dev))) + if (dev_is_removable(&parent->dev)) { dev_set_removable(&dev->dev, DEVICE_REMOVABLE); + return; + } + + if (arch_pci_dev_is_removable(dev)) { + pci_dbg(dev, "marking as removable\n"); + dev_set_removable(&dev->dev, DEVICE_REMOVABLE); + } } /** diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e6983e1f3eee..3cb8a6856c7a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4889,18 +4889,21 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) } /* - * Wangxun 10G/1G NICs have no ACS capability, and on multi-function - * devices, peer-to-peer transactions are not be used between the functions. - * So add an ACS quirk for below devices to isolate functions. + * Wangxun 40G/25G/10G/1G NICs have no ACS capability, but on + * multi-function devices, the hardware isolates the functions by + * directing all peer-to-peer traffic upstream as though PCI_ACS_RR and + * PCI_ACS_CR were set. * SFxxx 1G NICs(em). * RP1000/RP2000 10G NICs(sp). + * FF5xxx 40G/25G/10G NICs(aml). */ static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags) { switch (dev->device) { - case 0x0100 ... 0x010F: - case 0x1001: - case 0x2001: + case 0x0100 ... 0x010F: /* EM */ + case 0x1001: case 0x2001: /* SP */ + case 0x5010: case 0x5025: case 0x5040: /* AML */ + case 0x5110: case 0x5125: case 0x5140: /* AML */ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } @@ -5013,6 +5016,10 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1760, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1761, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1762, pci_quirk_mf_endpoint_acs }, + { PCI_VENDOR_ID_BROADCOM, 0x1763, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, @@ -5871,6 +5878,17 @@ SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */ SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */ SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */ +#define SWITCHTEC_PCI100X_QUIRK(vid) \ + DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_EFAR, vid, \ + PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias) +SWITCHTEC_PCI100X_QUIRK(0x1001); /* PCI1001XG4 */ +SWITCHTEC_PCI100X_QUIRK(0x1002); /* PCI1002XG4 */ +SWITCHTEC_PCI100X_QUIRK(0x1003); /* PCI1003XG4 */ +SWITCHTEC_PCI100X_QUIRK(0x1004); /* PCI1004XG4 */ +SWITCHTEC_PCI100X_QUIRK(0x1005); /* PCI1005XG4 */ +SWITCHTEC_PCI100X_QUIRK(0x1006); /* PCI1006XG4 */ + + /* * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints. * These IDs are used to forward responses to the originator on the other @@ -6140,6 +6158,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa72f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size); #endif diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index 4c54c75050dc..22d39e12b236 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -157,6 +157,13 @@ void pci_remove_root_bus(struct pci_bus *bus) list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) pci_remove_bus_device(child); + +#ifdef CONFIG_PCI_DOMAINS_GENERIC + /* Release domain_nr if it was dynamically allocated */ + if (host_bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET) + pci_bus_release_domain_nr(bus, host_bridge->dev.parent); +#endif + pci_remove_bus(bus); host_bridge->bus = NULL; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index b8cb990044fb..3ce68adda9b7 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -806,11 +806,9 @@ static resource_size_t calculate_iosize(resource_size_t size, size = (size & 0xff) + ((size & ~0xffUL) << 2); #endif size = size + size1; - if (size < old_size) - size = old_size; - size = ALIGN(max(size, add_size) + children_add_size, align); - return size; + size = max(size, add_size) + children_add_size; + return ALIGN(max(size, old_size), align); } static resource_size_t calculate_memsize(resource_size_t size, @@ -2027,8 +2025,7 @@ pci_root_bus_distribute_available_resources(struct pci_bus *bus, * in case of root bus. */ if (bridge && pci_bridge_resources_not_assigned(dev)) - pci_bridge_distribute_available_resources(bridge, - add_list); + pci_bridge_distribute_available_resources(dev, add_list); else pci_root_bus_distribute_available_resources(b, add_list); } diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index a0c67191a8b9..7f4bcf359b91 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -79,6 +79,7 @@ static void pci_slot_release(struct kobject *kobj) up_read(&pci_bus_sem); list_del(&slot->list); + pci_bus_put(slot->bus); kfree(slot); } @@ -261,7 +262,7 @@ placeholder: goto err; } - slot->bus = parent; + slot->bus = pci_bus_get(parent); slot->number = slot_nr; slot->kobj.kset = pci_slots_kset; @@ -269,6 +270,7 @@ placeholder: slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + pci_bus_put(slot->bus); kfree(slot); goto err; } diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 332af6938d7f..9011518b1d13 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -1739,6 +1739,26 @@ static void switchtec_pci_remove(struct pci_dev *pdev) .driver_data = gen, \ } +#define SWITCHTEC_PCI100X_DEVICE(device_id, gen) \ + { \ + .vendor = PCI_VENDOR_ID_EFAR, \ + .device = device_id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .class = (PCI_CLASS_MEMORY_OTHER << 8), \ + .class_mask = 0xFFFFFFFF, \ + .driver_data = gen, \ + }, \ + { \ + .vendor = PCI_VENDOR_ID_EFAR, \ + .device = device_id, \ + .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, \ + .class = (PCI_CLASS_BRIDGE_OTHER << 8), \ + .class_mask = 0xFFFFFFFF, \ + .driver_data = gen, \ + } + static const struct pci_device_id switchtec_pci_tbl[] = { SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */ SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */ @@ -1833,6 +1853,12 @@ static const struct pci_device_id switchtec_pci_tbl[] = { SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */ SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */ SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */ + SWITCHTEC_PCI100X_DEVICE(0x1001, SWITCHTEC_GEN4), /* PCI1001 16XG4 */ + SWITCHTEC_PCI100X_DEVICE(0x1002, SWITCHTEC_GEN4), /* PCI1002 12XG4 */ + SWITCHTEC_PCI100X_DEVICE(0x1003, SWITCHTEC_GEN4), /* PCI1003 16XG4 */ + SWITCHTEC_PCI100X_DEVICE(0x1004, SWITCHTEC_GEN4), /* PCI1004 16XG4 */ + SWITCHTEC_PCI100X_DEVICE(0x1005, SWITCHTEC_GEN4), /* PCI1005 16XG4 */ + SWITCHTEC_PCI100X_DEVICE(0x1006, SWITCHTEC_GEN4), /* PCI1006 16XG4 */ {0} }; MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index b8983f42e749..b2e0a79254f1 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -675,8 +675,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, if ((chan == 5 && cmn->rsp_vc_num < 2) || (chan == 6 && cmn->dat_vc_num < 2) || - (chan == 7 && cmn->snp_vc_num < 2) || - (chan == 8 && cmn->req_vc_num < 2)) + (chan == 7 && cmn->req_vc_num < 2) || + (chan == 8 && cmn->snp_vc_num < 2)) return 0; } @@ -794,8 +794,8 @@ static umode_t arm_cmn_event_attr_is_visible(struct kobject *kobj, _CMN_EVENT_XP(pub_##_name, (_event) | (4 << 5)), \ _CMN_EVENT_XP(rsp2_##_name, (_event) | (5 << 5)), \ _CMN_EVENT_XP(dat2_##_name, (_event) | (6 << 5)), \ - _CMN_EVENT_XP(snp2_##_name, (_event) | (7 << 5)), \ - _CMN_EVENT_XP(req2_##_name, (_event) | (8 << 5)) + _CMN_EVENT_XP(req2_##_name, (_event) | (7 << 5)), \ + _CMN_EVENT_XP(snp2_##_name, (_event) | (8 << 5)) static struct attribute *arm_cmn_event_attrs[] = { @@ -1951,8 +1951,6 @@ static int arm_cmn_init_dtcs(struct arm_cmn *cmn) continue; xp = arm_cmn_node_to_xp(cmn, dn); - dn->portid_bits = xp->portid_bits; - dn->deviceid_bits = xp->deviceid_bits; dn->dtc = xp->dtc; dn->dtm = xp->dtm; if (cmn->multi_dtm) @@ -2183,6 +2181,8 @@ static int arm_cmn_discover(struct arm_cmn *cmn, unsigned int rgn_offset) } arm_cmn_init_node_info(cmn, reg & CMN_CHILD_NODE_ADDR, dn); + dn->portid_bits = xp->portid_bits; + dn->deviceid_bits = xp->deviceid_bits; switch (dn->type) { case CMN_TYPE_DTC: @@ -2313,6 +2313,7 @@ static int arm_cmn_probe(struct platform_device *pdev) cmn->dev = &pdev->dev; cmn->part = (unsigned long)device_get_match_data(cmn->dev); + cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); platform_set_drvdata(pdev, cmn); if (cmn->part == PART_CMN600 && has_acpi_companion(cmn->dev)) { @@ -2340,7 +2341,6 @@ static int arm_cmn_probe(struct platform_device *pdev) if (err) return err; - cmn->cpu = cpumask_local_spread(0, dev_to_node(cmn->dev)); cmn->pmu = (struct pmu) { .module = THIS_MODULE, .attr_groups = arm_cmn_attr_groups, diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 3f07df5a7e95..d351d6ce750b 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -340,12 +340,10 @@ armpmu_add(struct perf_event *event, int flags) if (idx < 0) return idx; - /* - * If there is an event in the counter we are going to use then make - * sure it is disabled. - */ + /* The newly-allocated counter should be empty */ + WARN_ON_ONCE(hw_events->events[idx]); + event->hw.idx = idx; - armpmu->disable(event); hw_events->events[idx] = event; hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index 0e17c57ddb87..feaf79b98001 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -431,6 +431,17 @@ static int smmu_pmu_event_init(struct perf_event *event) return -EINVAL; } + /* + * Ensure all events are on the same cpu so all events are in the + * same cpu context, to avoid races on pmu_enable etc. + */ + event->cpu = smmu_pmu->on_cpu; + + hwc->idx = -1; + + if (event->group_leader == event) + return 0; + for_each_sibling_event(sibling, event->group_leader) { if (is_software_event(sibling)) continue; @@ -442,14 +453,6 @@ static int smmu_pmu_event_init(struct perf_event *event) return -EINVAL; } - hwc->idx = -1; - - /* - * Ensure all events are on the same cpu so all events are in the - * same cpu context, to avoid races on pmu_enable etc. - */ - event->cpu = smmu_pmu->on_cpu; - return 0; } diff --git a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c index 3b374b37b965..1bc9557c5806 100644 --- a/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c +++ b/drivers/phy/broadcom/phy-brcm-usb-init-synopsys.c @@ -309,6 +309,12 @@ static void usb_init_common_7216(struct brcm_usb_init_params *params) void __iomem *ctrl = params->regs[BRCM_REGS_CTRL]; USB_CTRL_UNSET(ctrl, USB_PM, XHC_S2_CLK_SWITCH_EN); + + /* + * The PHY might be in a bad state if it is already powered + * up. Toggle the power just in case. + */ + USB_CTRL_SET(ctrl, USB_PM, USB_PWRDN); USB_CTRL_UNSET(ctrl, USB_PM, USB_PWRDN); /* 1 millisecond - for USB clocks to settle down */ diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c index 211ce84d980f..4f334ac5a7a9 100644 --- a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c +++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c @@ -50,6 +50,7 @@ enum imx8_pcie_phy_type { IMX8MM, + IMX8MP, }; struct imx8_pcie_phy_drvdata { @@ -62,6 +63,7 @@ struct imx8_pcie_phy { struct clk *clk; struct phy *phy; struct regmap *iomuxc_gpr; + struct reset_control *perst; struct reset_control *reset; u32 refclk_pad_mode; u32 tx_deemph_gen1; @@ -76,11 +78,11 @@ static int imx8_pcie_phy_power_on(struct phy *phy) u32 val, pad_mode; struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); - reset_control_assert(imx8_phy->reset); - pad_mode = imx8_phy->refclk_pad_mode; switch (imx8_phy->drvdata->variant) { case IMX8MM: + reset_control_assert(imx8_phy->reset); + /* Tune PHY de-emphasis setting to pass PCIe compliance. */ if (imx8_phy->tx_deemph_gen1) writel(imx8_phy->tx_deemph_gen1, @@ -89,6 +91,8 @@ static int imx8_pcie_phy_power_on(struct phy *phy) writel(imx8_phy->tx_deemph_gen2, imx8_phy->base + PCIE_PHY_TRSV_REG6); break; + case IMX8MP: /* Do nothing. */ + break; } if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT || @@ -139,18 +143,21 @@ static int imx8_pcie_phy_power_on(struct phy *phy) IMX8MM_GPR_PCIE_REF_CLK_PLL); usleep_range(100, 200); - /* Do the PHY common block reset */ - regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, - IMX8MM_GPR_PCIE_CMN_RST, - IMX8MM_GPR_PCIE_CMN_RST); - switch (imx8_phy->drvdata->variant) { + case IMX8MP: + reset_control_deassert(imx8_phy->perst); + fallthrough; case IMX8MM: reset_control_deassert(imx8_phy->reset); usleep_range(200, 500); break; } + /* Do the PHY common block reset */ + regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14, + IMX8MM_GPR_PCIE_CMN_RST, + IMX8MM_GPR_PCIE_CMN_RST); + /* Polling to check the phy is ready or not. */ ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG75, val, val == PCIE_PHY_CMN_REG75_PLL_DONE, @@ -158,6 +165,16 @@ static int imx8_pcie_phy_power_on(struct phy *phy) return ret; } +static int imx8_pcie_phy_power_off(struct phy *phy) +{ + struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); + + reset_control_assert(imx8_phy->reset); + reset_control_assert(imx8_phy->perst); + + return 0; +} + static int imx8_pcie_phy_init(struct phy *phy) { struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy); @@ -178,6 +195,7 @@ static const struct phy_ops imx8_pcie_phy_ops = { .init = imx8_pcie_phy_init, .exit = imx8_pcie_phy_exit, .power_on = imx8_pcie_phy_power_on, + .power_off = imx8_pcie_phy_power_off, .owner = THIS_MODULE, }; @@ -186,8 +204,14 @@ static const struct imx8_pcie_phy_drvdata imx8mm_drvdata = { .variant = IMX8MM, }; +static const struct imx8_pcie_phy_drvdata imx8mp_drvdata = { + .gpr = "fsl,imx8mp-iomuxc-gpr", + .variant = IMX8MP, +}; + static const struct of_device_id imx8_pcie_phy_of_match[] = { {.compatible = "fsl,imx8mm-pcie-phy", .data = &imx8mm_drvdata, }, + {.compatible = "fsl,imx8mp-pcie-phy", .data = &imx8mp_drvdata, }, { }, }; MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match); @@ -243,6 +267,14 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev) return PTR_ERR(imx8_phy->reset); } + if (imx8_phy->drvdata->variant == IMX8MP) { + imx8_phy->perst = + devm_reset_control_get_exclusive(dev, "perst"); + if (IS_ERR(imx8_phy->perst)) + return dev_err_probe(dev, PTR_ERR(imx8_phy->perst), + "Failed to get PCIE PHY PERST control\n"); + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); imx8_phy->base = devm_ioremap_resource(dev, res); if (IS_ERR(imx8_phy->base)) diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c index e56bc020f695..c1441c69d714 100644 --- a/drivers/phy/phy-core.c +++ b/drivers/phy/phy-core.c @@ -138,8 +138,10 @@ static struct phy_provider *of_phy_provider_lookup(struct device_node *node) return phy_provider; for_each_child_of_node(phy_provider->children, child) - if (child == node) + if (child == node) { + of_node_put(child); return phy_provider; + } } return ERR_PTR(-EPROBE_DEFER); @@ -396,13 +398,14 @@ EXPORT_SYMBOL_GPL(phy_power_off); int phy_set_mode_ext(struct phy *phy, enum phy_mode mode, int submode) { - int ret; + int ret = 0; - if (!phy || !phy->ops->set_mode) + if (!phy) return 0; mutex_lock(&phy->mutex); - ret = phy->ops->set_mode(phy, mode, submode); + if (phy->ops->set_mode) + ret = phy->ops->set_mode(phy, mode, submode); if (!ret) phy->attrs.mode = mode; mutex_unlock(&phy->mutex); @@ -575,8 +578,10 @@ static struct phy *_of_phy_get(struct device_node *np, int index) return ERR_PTR(-ENODEV); /* This phy type handled by the usb-phy subsystem for now */ - if (of_device_is_compatible(args.np, "usb-nop-xceiv")) - return ERR_PTR(-ENODEV); + if (of_device_is_compatible(args.np, "usb-nop-xceiv")) { + phy = ERR_PTR(-ENODEV); + goto out_put_node; + } mutex_lock(&phy_provider_mutex); phy_provider = of_phy_provider_lookup(args.np); @@ -598,6 +603,7 @@ out_put_module: out_unlock: mutex_unlock(&phy_provider_mutex); +out_put_node: of_node_put(args.np); return phy; @@ -683,7 +689,7 @@ void devm_phy_put(struct device *dev, struct phy *phy) if (!phy) return; - r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy); + r = devres_release(dev, devm_phy_release, devm_phy_match, phy); dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); } EXPORT_SYMBOL_GPL(devm_phy_put); @@ -1056,7 +1062,7 @@ void devm_phy_destroy(struct device *dev, struct phy *phy) { int r; - r = devres_destroy(dev, devm_phy_consume, devm_phy_match, phy); + r = devres_release(dev, devm_phy_consume, devm_phy_match, phy); dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n"); } EXPORT_SYMBOL_GPL(devm_phy_destroy); @@ -1194,12 +1200,12 @@ EXPORT_SYMBOL_GPL(of_phy_provider_unregister); * of_phy_provider_unregister to unregister the phy provider. */ void devm_of_phy_provider_unregister(struct device *dev, - struct phy_provider *phy_provider) + struct phy_provider *phy_provider) { int r; - r = devres_destroy(dev, devm_phy_provider_release, devm_phy_match, - phy_provider); + r = devres_release(dev, devm_phy_provider_release, devm_phy_match, + phy_provider); dev_WARN_ONCE(dev, r, "couldn't find PHY provider device resource\n"); } EXPORT_SYMBOL_GPL(devm_of_phy_provider_unregister); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c index f0ba35bb73c1..605591314f25 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c @@ -1393,7 +1393,7 @@ static const struct qmp_phy_init_tbl sc8280xp_usb3_uniphy_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f), QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff), QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f), - QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a), QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54), QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f), QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c index 7e61c6b278a7..024cc5ce68a3 100644 --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c @@ -9,6 +9,7 @@ * Copyright (C) 2014 Cogent Embedded, Inc. */ +#include #include #include #include @@ -21,12 +22,14 @@ #include #include #include +#include #include #include #include /******* USB2.0 Host registers (original offset is +0x200) *******/ #define USB2_INT_ENABLE 0x000 +#define USB2_AHB_BUS_CTR 0x008 #define USB2_USBCTR 0x00c #define USB2_SPD_RSM_TIMSET 0x10c #define USB2_OC_TIMSET 0x110 @@ -42,6 +45,10 @@ #define USB2_INT_ENABLE_USBH_INTB_EN BIT(2) /* For EHCI */ #define USB2_INT_ENABLE_USBH_INTA_EN BIT(1) /* For OHCI */ +/* AHB_BUS_CTR */ +#define USB2_AHB_BUS_CTR_MBL_MASK GENMASK(1, 0) +#define USB2_AHB_BUS_CTR_MBL_INCR4 2 + /* USBCTR */ #define USB2_USBCTR_DIRPD BIT(2) #define USB2_USBCTR_PLL_RST BIT(1) @@ -103,7 +110,6 @@ struct rcar_gen3_phy { struct rcar_gen3_chan *ch; u32 int_enable_bits; bool initialized; - bool otg_initialized; bool powered; }; @@ -113,10 +119,10 @@ struct rcar_gen3_chan { struct extcon_dev *extcon; struct rcar_gen3_phy rphys[NUM_OF_PHYS]; struct regulator *vbus; + struct reset_control *rstc; struct work_struct work; - struct mutex lock; /* protects rphys[...].powered */ + spinlock_t lock; /* protects access to hardware and driver data structure. */ enum usb_dr_mode dr_mode; - int irq; u32 obint_enable_bits; bool extcon_host; bool is_otg_channel; @@ -127,6 +133,7 @@ struct rcar_gen3_chan { struct rcar_gen3_phy_drv_data { const struct phy_ops *phy_usb2_ops; bool no_adp_ctrl; + bool init_bus; }; /* @@ -311,16 +318,15 @@ static bool rcar_gen3_is_any_rphy_initialized(struct rcar_gen3_chan *ch) return false; } -static bool rcar_gen3_needs_init_otg(struct rcar_gen3_chan *ch) +static bool rcar_gen3_is_any_otg_rphy_initialized(struct rcar_gen3_chan *ch) { - int i; - - for (i = 0; i < NUM_OF_PHYS; i++) { - if (ch->rphys[i].otg_initialized) - return false; + for (enum rcar_gen3_phy_index i = PHY_INDEX_BOTH_HC; i <= PHY_INDEX_EHCI; + i++) { + if (ch->rphys[i].initialized) + return true; } - return true; + return false; } static bool rcar_gen3_are_all_rphys_power_off(struct rcar_gen3_chan *ch) @@ -342,7 +348,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr, bool is_b_device; enum phy_mode cur_mode, new_mode; - if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) + guard(spinlock_irqsave)(&ch->lock); + + if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) return -EIO; if (sysfs_streq(buf, "host")) @@ -380,7 +388,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr, { struct rcar_gen3_chan *ch = dev_get_drvdata(dev); - if (!ch->is_otg_channel || !rcar_gen3_is_any_rphy_initialized(ch)) + if (!ch->is_otg_channel || !rcar_gen3_is_any_otg_rphy_initialized(ch)) return -EIO; return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" : @@ -393,6 +401,9 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) void __iomem *usb2_base = ch->base; u32 val; + if (!ch->is_otg_channel || rcar_gen3_is_any_otg_rphy_initialized(ch)) + return; + /* Should not use functions of read-modify-write a register */ val = readl(usb2_base + USB2_LINECTRL1); val = (val & ~USB2_LINECTRL1_DP_RPD) | USB2_LINECTRL1_DPRPD_EN | @@ -406,7 +417,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) val = readl(usb2_base + USB2_ADPCTRL); writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); } - msleep(20); + mdelay(20); writel(0xffffffff, usb2_base + USB2_OBINTSTA); writel(ch->obint_enable_bits, usb2_base + USB2_OBINTEN); @@ -418,16 +429,27 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch) { struct rcar_gen3_chan *ch = _ch; void __iomem *usb2_base = ch->base; - u32 status = readl(usb2_base + USB2_OBINTSTA); + struct device *dev = ch->dev; irqreturn_t ret = IRQ_NONE; + u32 status; - if (status & ch->obint_enable_bits) { - dev_vdbg(ch->dev, "%s: %08x\n", __func__, status); - writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); - rcar_gen3_device_recognition(ch); - ret = IRQ_HANDLED; + pm_runtime_get_noresume(dev); + + if (pm_runtime_suspended(dev)) + goto rpm_put; + + scoped_guard(spinlock, &ch->lock) { + status = readl(usb2_base + USB2_OBINTSTA); + if (status & ch->obint_enable_bits) { + dev_vdbg(dev, "%s: %08x\n", __func__, status); + writel(ch->obint_enable_bits, usb2_base + USB2_OBINTSTA); + rcar_gen3_device_recognition(ch); + ret = IRQ_HANDLED; + } } +rpm_put: + pm_runtime_put_noidle(dev); return ret; } @@ -437,32 +459,23 @@ static int rcar_gen3_phy_usb2_init(struct phy *p) struct rcar_gen3_chan *channel = rphy->ch; void __iomem *usb2_base = channel->base; u32 val; - int ret; - if (!rcar_gen3_is_any_rphy_initialized(channel) && channel->irq >= 0) { - INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); - ret = request_irq(channel->irq, rcar_gen3_phy_usb2_irq, - IRQF_SHARED, dev_name(channel->dev), channel); - if (ret < 0) { - dev_err(channel->dev, "No irq handler (%d)\n", channel->irq); - return ret; - } - } + guard(spinlock_irqsave)(&channel->lock); /* Initialize USB2 part */ val = readl(usb2_base + USB2_INT_ENABLE); val |= USB2_INT_ENABLE_UCOM_INTEN | rphy->int_enable_bits; writel(val, usb2_base + USB2_INT_ENABLE); - writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); - writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); - /* Initialize otg part */ - if (channel->is_otg_channel) { - if (rcar_gen3_needs_init_otg(channel)) - rcar_gen3_init_otg(channel); - rphy->otg_initialized = true; + if (!rcar_gen3_is_any_rphy_initialized(channel)) { + writel(USB2_SPD_RSM_TIMSET_INIT, usb2_base + USB2_SPD_RSM_TIMSET); + writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET); } + /* Initialize otg part (only if we initialize a PHY with IRQs). */ + if (rphy->int_enable_bits) + rcar_gen3_init_otg(channel); + rphy->initialized = true; return 0; @@ -475,10 +488,9 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) void __iomem *usb2_base = channel->base; u32 val; - rphy->initialized = false; + guard(spinlock_irqsave)(&channel->lock); - if (channel->is_otg_channel) - rphy->otg_initialized = false; + rphy->initialized = false; val = readl(usb2_base + USB2_INT_ENABLE); val &= ~rphy->int_enable_bits; @@ -486,9 +498,6 @@ static int rcar_gen3_phy_usb2_exit(struct phy *p) val &= ~USB2_INT_ENABLE_UCOM_INTEN; writel(val, usb2_base + USB2_INT_ENABLE); - if (channel->irq >= 0 && !rcar_gen3_is_any_rphy_initialized(channel)) - free_irq(channel->irq, channel); - return 0; } @@ -500,16 +509,17 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) u32 val; int ret = 0; - mutex_lock(&channel->lock); - if (!rcar_gen3_are_all_rphys_power_off(channel)) - goto out; - if (channel->vbus) { ret = regulator_enable(channel->vbus); if (ret) - goto out; + return ret; } + guard(spinlock_irqsave)(&channel->lock); + + if (!rcar_gen3_are_all_rphys_power_off(channel)) + goto out; + val = readl(usb2_base + USB2_USBCTR); val |= USB2_USBCTR_PLL_RST; writel(val, usb2_base + USB2_USBCTR); @@ -519,7 +529,6 @@ static int rcar_gen3_phy_usb2_power_on(struct phy *p) out: /* The powered flag should be set for any other phys anyway */ rphy->powered = true; - mutex_unlock(&channel->lock); return 0; } @@ -530,18 +539,20 @@ static int rcar_gen3_phy_usb2_power_off(struct phy *p) struct rcar_gen3_chan *channel = rphy->ch; int ret = 0; - mutex_lock(&channel->lock); - rphy->powered = false; + scoped_guard(spinlock_irqsave, &channel->lock) { + rphy->powered = false; - if (!rcar_gen3_are_all_rphys_power_off(channel)) - goto out; + if (rcar_gen3_are_all_rphys_power_off(channel)) { + u32 val = readl(channel->base + USB2_USBCTR); + + val |= USB2_USBCTR_PLL_RST; + writel(val, channel->base + USB2_USBCTR); + } + } if (channel->vbus) ret = regulator_disable(channel->vbus); -out: - mutex_unlock(&channel->lock); - return ret; } @@ -649,13 +660,42 @@ static enum usb_dr_mode rcar_gen3_get_dr_mode(struct device_node *np) return candidate; } +static int rcar_gen3_phy_usb2_init_bus(struct rcar_gen3_chan *channel) +{ + struct device *dev = channel->dev; + int ret; + u32 val; + + channel->rstc = devm_reset_control_array_get_shared(dev); + if (IS_ERR(channel->rstc)) + return PTR_ERR(channel->rstc); + + ret = pm_runtime_resume_and_get(dev); + if (ret) + return ret; + + ret = reset_control_deassert(channel->rstc); + if (ret) + goto rpm_put; + + val = readl(channel->base + USB2_AHB_BUS_CTR); + val &= ~USB2_AHB_BUS_CTR_MBL_MASK; + val |= USB2_AHB_BUS_CTR_MBL_INCR4; + writel(val, channel->base + USB2_AHB_BUS_CTR); + +rpm_put: + pm_runtime_put(dev); + + return ret; +} + static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) { const struct rcar_gen3_phy_drv_data *phy_data; struct device *dev = &pdev->dev; struct rcar_gen3_chan *channel; struct phy_provider *provider; - int ret = 0, i; + int ret = 0, i, irq; if (!dev->of_node) { dev_err(dev, "This driver needs device tree\n"); @@ -671,8 +711,6 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) return PTR_ERR(channel->base); channel->obint_enable_bits = USB2_OBINT_BITS; - /* get irq number here and request_irq for OTG in phy_init */ - channel->irq = platform_get_irq_optional(pdev, 0); channel->dr_mode = rcar_gen3_get_dr_mode(dev->of_node); if (channel->dr_mode != USB_DR_MODE_UNKNOWN) { channel->is_otg_channel = true; @@ -702,11 +740,20 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) goto error; } + platform_set_drvdata(pdev, channel); + channel->dev = dev; + + if (phy_data->init_bus) { + ret = rcar_gen3_phy_usb2_init_bus(channel); + if (ret) + goto error; + } + channel->soc_no_adp_ctrl = phy_data->no_adp_ctrl; if (phy_data->no_adp_ctrl) channel->obint_enable_bits = USB2_OBINT_IDCHG_EN; - mutex_init(&channel->lock); + spin_lock_init(&channel->lock); for (i = 0; i < NUM_OF_PHYS; i++) { channel->rphys[i].phy = devm_phy_create(dev, NULL, phy_data->phy_usb2_ops); @@ -729,8 +776,19 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev) channel->vbus = NULL; } - platform_set_drvdata(pdev, channel); - channel->dev = dev; + irq = platform_get_irq_optional(pdev, 0); + if (irq < 0 && irq != -ENXIO) { + ret = irq; + goto error; + } else if (irq > 0) { + INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work); + ret = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq, + IRQF_SHARED, dev_name(dev), channel); + if (ret < 0) { + dev_err(dev, "Failed to request irq (%d)\n", irq); + goto error; + } + } provider = devm_of_phy_provider_register(dev, rcar_gen3_phy_usb2_xlate); if (IS_ERR(provider)) { @@ -758,6 +816,7 @@ static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev) if (channel->is_otg_channel) device_remove_file(&pdev->dev, &dev_attr_role); + reset_control_assert(channel->rstc); pm_runtime_disable(&pdev->dev); return 0; diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index ee0848fe8432..ca6101ef6076 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -288,9 +288,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst) reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK; /* FSEL settings corresponding to reference clock */ - reg &= ~PHYCLKRST_FSEL_PIPE_MASK | - PHYCLKRST_MPLL_MULTIPLIER_MASK | - PHYCLKRST_SSC_REFCLKSEL_MASK; + reg &= ~(PHYCLKRST_FSEL_PIPE_MASK | + PHYCLKRST_MPLL_MULTIPLIER_MASK | + PHYCLKRST_SSC_REFCLKSEL_MASK); switch (phy_drd->extrefclk) { case EXYNOS5_FSEL_50MHZ: reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF | @@ -332,9 +332,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst) reg &= ~PHYCLKRST_REFCLKSEL_MASK; reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK; - reg &= ~PHYCLKRST_FSEL_UTMI_MASK | - PHYCLKRST_MPLL_MULTIPLIER_MASK | - PHYCLKRST_SSC_REFCLKSEL_MASK; + reg &= ~(PHYCLKRST_FSEL_UTMI_MASK | + PHYCLKRST_MPLL_MULTIPLIER_MASK | + PHYCLKRST_SSC_REFCLKSEL_MASK); reg |= PHYCLKRST_FSEL(phy_drd->extrefclk); return reg; diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index 0996ede63387..0556a57f3ddf 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -900,6 +900,7 @@ static int tegra186_utmi_phy_exit(struct phy *phy) unsigned int index = lane->index; struct device *dev = padctl->dev; int err; + u32 reg; port = tegra_xusb_find_usb2_port(padctl, index); if (!port) { @@ -907,6 +908,16 @@ static int tegra186_utmi_phy_exit(struct phy *phy) return -ENODEV; } + if (port->mode == USB_DR_MODE_OTG || + port->mode == USB_DR_MODE_PERIPHERAL) { + /* reset VBUS&ID OVERRIDE */ + reg = padctl_readl(padctl, USB2_VBUS_ID); + reg &= ~VBUS_OVERRIDE; + reg &= ~ID_OVERRIDE(~0); + reg |= ID_OVERRIDE_FLOATING; + padctl_writel(padctl, reg, USB2_VBUS_ID); + } + if (port->supply && port->mode == USB_DR_MODE_HOST) { err = regulator_disable(port->supply); if (err) { diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index dc22b1dd2c8b..e3acd40e7cbc 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -542,16 +542,16 @@ static int tegra_xusb_port_init(struct tegra_xusb_port *port, err = dev_set_name(&port->dev, "%s-%u", name, index); if (err < 0) - goto unregister; + goto put_device; err = device_add(&port->dev); if (err < 0) - goto unregister; + goto put_device; return 0; -unregister: - device_unregister(&port->dev); +put_device: + put_device(&port->dev); return err; } diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c index fd52a83387ef..c313f0178957 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c @@ -69,7 +69,7 @@ static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI; struct bcm281xx_pin_function { const char *name; const char * const *groups; - const unsigned ngroups; + const unsigned int ngroups; }; /* @@ -81,10 +81,10 @@ struct bcm281xx_pinctrl_data { /* List of all pins */ const struct pinctrl_pin_desc *pins; - const unsigned npins; + const unsigned int npins; const struct bcm281xx_pin_function *functions; - const unsigned nfunctions; + const unsigned int nfunctions; struct regmap *regmap; }; @@ -938,7 +938,7 @@ static struct bcm281xx_pinctrl_data bcm281xx_pinctrl = { }; static inline enum bcm281xx_pin_type pin_type_get(struct pinctrl_dev *pctldev, - unsigned pin) + unsigned int pin) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); @@ -971,7 +971,7 @@ static const struct regmap_config bcm281xx_pinctrl_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = BCM281XX_PIN_VC_CAM3_SDA, + .max_register = BCM281XX_PIN_VC_CAM3_SDA * 4, }; static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) @@ -982,7 +982,7 @@ static int bcm281xx_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) } static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev, - unsigned group) + unsigned int group) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); @@ -990,9 +990,9 @@ static const char *bcm281xx_pinctrl_get_group_name(struct pinctrl_dev *pctldev, } static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, - unsigned group, + unsigned int group, const unsigned **pins, - unsigned *num_pins) + unsigned int *num_pins) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); @@ -1004,7 +1004,7 @@ static int bcm281xx_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, static void bcm281xx_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, - unsigned offset) + unsigned int offset) { seq_printf(s, " %s", dev_name(pctldev->dev)); } @@ -1026,7 +1026,7 @@ static int bcm281xx_pinctrl_get_fcns_count(struct pinctrl_dev *pctldev) } static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev, - unsigned function) + unsigned int function) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); @@ -1034,9 +1034,9 @@ static const char *bcm281xx_pinctrl_get_fcn_name(struct pinctrl_dev *pctldev, } static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev, - unsigned function, + unsigned int function, const char * const **groups, - unsigned * const num_groups) + unsigned int * const num_groups) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); @@ -1047,8 +1047,8 @@ static int bcm281xx_pinctrl_get_fcn_groups(struct pinctrl_dev *pctldev, } static int bcm281xx_pinmux_set(struct pinctrl_dev *pctldev, - unsigned function, - unsigned group) + unsigned int function, + unsigned int group) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); const struct bcm281xx_pin_function *f = &pdata->functions[function]; @@ -1079,7 +1079,7 @@ static const struct pinmux_ops bcm281xx_pinctrl_pinmux_ops = { }; static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *config) { return -ENOTSUPP; @@ -1088,9 +1088,9 @@ static int bcm281xx_pinctrl_pin_config_get(struct pinctrl_dev *pctldev, /* Goes through the configs and update register val/mask */ static int bcm281xx_std_pin_update(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs, + unsigned int num_configs, u32 *val, u32 *mask) { @@ -1204,9 +1204,9 @@ static const u16 bcm281xx_pullup_map[] = { /* Goes through the configs and update register val/mask */ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs, + unsigned int num_configs, u32 *val, u32 *mask) { @@ -1274,9 +1274,9 @@ static int bcm281xx_i2c_pin_update(struct pinctrl_dev *pctldev, /* Goes through the configs and update register val/mask */ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs, + unsigned int num_configs, u32 *val, u32 *mask) { @@ -1318,9 +1318,9 @@ static int bcm281xx_hdmi_pin_update(struct pinctrl_dev *pctldev, } static int bcm281xx_pinctrl_pin_config_set(struct pinctrl_dev *pctldev, - unsigned pin, + unsigned int pin, unsigned long *configs, - unsigned num_configs) + unsigned int num_configs) { struct bcm281xx_pinctrl_data *pdata = pinctrl_dev_get_drvdata(pctldev); enum bcm281xx_pin_type pin_type; diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 5ee746cb81f5..6520b88db110 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -143,10 +143,14 @@ static int dt_to_map_one_config(struct pinctrl *p, pctldev = get_pinctrl_dev_from_of_node(np_pctldev); if (pctldev) break; - /* Do not defer probing of hogs (circular loop) */ + /* + * Do not defer probing of hogs (circular loop) + * + * Return 1 to let the caller catch the case. + */ if (np_pctldev == p->dev->of_node) { of_node_put(np_pctldev); - return -ENODEV; + return 1; } } of_node_put(np_pctldev); @@ -265,6 +269,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev) ret = dt_to_map_one_config(p, pctldev, statename, np_config); of_node_put(np_config); + if (ret == 1) + continue; if (ret < 0) goto err; } diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig index 7a32f77792d9..86110fc3c146 100644 --- a/drivers/pinctrl/freescale/Kconfig +++ b/drivers/pinctrl/freescale/Kconfig @@ -9,7 +9,7 @@ config PINCTRL_IMX config PINCTRL_IMX_SCU tristate - depends on IMX_SCU + depends on IMX_SCU || COMPILE_TEST select PINCTRL_IMX config PINCTRL_IMX1_CORE diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 530f3f934e19..1f05f7f1a9ae 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -487,7 +487,7 @@ static int meson_pinconf_get(struct pinctrl_dev *pcdev, unsigned int pin, case PIN_CONFIG_BIAS_PULL_DOWN: case PIN_CONFIG_BIAS_PULL_UP: if (meson_pinconf_get_pull(pc, pin) == param) - arg = 1; + arg = 60000; else return -EINVAL; break; diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c index 5abab6bc763a..f7c8ae980813 100644 --- a/drivers/pinctrl/pinctrl-cy8c95x0.c +++ b/drivers/pinctrl/pinctrl-cy8c95x0.c @@ -1234,7 +1234,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq) ret = devm_request_threaded_irq(chip->dev, irq, NULL, cy8c95x0_irq_handler, - IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH, + IRQF_ONESHOT | IRQF_SHARED, dev_name(chip->dev), chip); if (ret) { dev_err(chip->dev, "failed to request irq %d\n", irq); diff --git a/drivers/pinctrl/pinctrl-k210.c b/drivers/pinctrl/pinctrl-k210.c index ad4db99094a7..e75c96c6b3da 100644 --- a/drivers/pinctrl/pinctrl-k210.c +++ b/drivers/pinctrl/pinctrl-k210.c @@ -181,7 +181,7 @@ static const u32 k210_pinconf_mode_id_to_mode[] = { [K210_PC_DEFAULT_INT13] = K210_PC_MODE_IN | K210_PC_PU, }; -#undef DEFAULT +#undef K210_PC_DEFAULT /* * Pin functions configuration information. diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 5f356edfd0fd..2e8bbef8ca34 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -85,6 +85,7 @@ const struct regmap_config mcp23x08_regmap = { .num_reg_defaults = ARRAY_SIZE(mcp23x08_defaults), .cache_type = REGCACHE_FLAT, .max_register = MCP_OLAT, + .disable_locking = true, /* mcp->lock protects the regmap */ }; EXPORT_SYMBOL_GPL(mcp23x08_regmap); @@ -131,6 +132,7 @@ const struct regmap_config mcp23x17_regmap = { .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults), .cache_type = REGCACHE_FLAT, .val_format_endian = REGMAP_ENDIAN_LITTLE, + .disable_locking = true, /* mcp->lock protects the regmap */ }; EXPORT_SYMBOL_GPL(mcp23x17_regmap); @@ -228,7 +230,9 @@ static int mcp_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin, switch (param) { case PIN_CONFIG_BIAS_PULL_UP: + mutex_lock(&mcp->lock); ret = mcp_read(mcp, MCP_GPPU, &data); + mutex_unlock(&mcp->lock); if (ret < 0) return ret; status = (data & BIT(pin)) ? 1 : 0; @@ -257,7 +261,9 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, switch (param) { case PIN_CONFIG_BIAS_PULL_UP: + mutex_lock(&mcp->lock); ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg); + mutex_unlock(&mcp->lock); break; default: dev_dbg(mcp->dev, "Invalid config param %04x\n", param); diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c index c98f35ad8921..5a6be66c1a0b 100644 --- a/drivers/pinctrl/pinctrl-zynqmp.c +++ b/drivers/pinctrl/pinctrl-zynqmp.c @@ -45,7 +45,6 @@ * @name: Name of the pin mux function * @groups: List of pin groups for this function * @ngroups: Number of entries in @groups - * @node: Firmware node matching with the function * * This structure holds information about pin control function * and function group names supporting that function. diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 8bf8b21954fe..7d25d95e156c 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1006,8 +1006,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) struct msm_pinctrl *pctrl = gpiochip_get_data(gc); const struct msm_pingroup *g; unsigned long flags; - bool was_enabled; - u32 val; + u32 val, oldval; if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) { set_bit(d->hwirq, pctrl->dual_edge_irqs); @@ -1067,8 +1066,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) * internal circuitry of TLMM, toggling the RAW_STATUS * could cause the INTR_STATUS to be set for EDGE interrupts. */ - val = msm_readl_intr_cfg(pctrl, g); - was_enabled = val & BIT(g->intr_raw_status_bit); + val = oldval = msm_readl_intr_cfg(pctrl, g); val |= BIT(g->intr_raw_status_bit); if (g->intr_detection_width == 2) { val &= ~(3 << g->intr_detection_bit); @@ -1121,9 +1119,11 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) /* * The first time we set RAW_STATUS_EN it could trigger an interrupt. * Clear the interrupt. This is safe because we have - * IRQCHIP_SET_TYPE_MASKED. + * IRQCHIP_SET_TYPE_MASKED. When changing the interrupt type, we could + * also still have a non-matching interrupt latched, so clear whenever + * making changes to the interrupt configuration. */ - if (!was_enabled) + if (val != oldval) msm_ack_intr_status(pctrl, g); if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index 6d43c2123e69..63255cd35bd7 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -665,7 +665,7 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev, "push-pull", "open-drain", "open-source" }; static const char *const strengths[] = { - "no", "high", "medium", "low" + "no", "low", "medium", "high" }; pad = pctldev->desc->pins[pin].drv_data; @@ -1219,6 +1219,8 @@ static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 }, { .compatible = "qcom,pm8450-gpio", .data = (void *) 4 }, { .compatible = "qcom,pm8916-gpio", .data = (void *) 4 }, + /* pm8937 has 8 GPIOs with holes on 3, 4 and 6 */ + { .compatible = "qcom,pm8937-gpio", .data = (void *) 8 }, { .compatible = "qcom,pm8941-gpio", .data = (void *) 36 }, /* pm8950 has 8 GPIOs with holes on 3 */ { .compatible = "qcom,pm8950-gpio", .data = (void *) 8 }, diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c index 6937157f50b3..148cc107b7af 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c @@ -964,6 +964,7 @@ static const struct of_device_id pmic_mpp_of_match[] = { { .compatible = "qcom,pm8226-mpp", .data = (void *) 8 }, { .compatible = "qcom,pm8841-mpp", .data = (void *) 4 }, { .compatible = "qcom,pm8916-mpp", .data = (void *) 4 }, + { .compatible = "qcom,pm8937-mpp", .data = (void *) 4 }, { .compatible = "qcom,pm8941-mpp", .data = (void *) 8 }, { .compatible = "qcom,pm8950-mpp", .data = (void *) 4 }, { .compatible = "qcom,pmi8950-mpp", .data = (void *) 4 }, diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c index 12126e30dc20..6f0d3fda61c2 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza2.c +++ b/drivers/pinctrl/renesas/pinctrl-rza2.c @@ -242,6 +242,9 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv) int ret; chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np); + if (!chip.label) + return -ENOMEM; + chip.parent = priv->dev; chip.ngpio = priv->npins; @@ -252,6 +255,8 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv) return ret; } + of_node_put(of_args.np); + if ((of_args.args[0] != 0) || (of_args.args[1] != 0) || (of_args.args[2] != priv->npins)) { diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index 159812fe1c97..1304ab0bcac1 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -1365,6 +1365,8 @@ static int rzg2l_gpio_register(struct rzg2l_pinctrl *pctrl) return ret; } + of_node_put(of_args.np); + if (of_args.args[0] != 0 || of_args.args[1] != 0 || of_args.args[2] != pctrl->data->n_port_pins) { dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n"); diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c index 2858800288bb..dc4299c03b99 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c +++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c @@ -943,6 +943,8 @@ static int rzv2m_gpio_register(struct rzv2m_pinctrl *pctrl) return ret; } + of_node_put(of_args.np); + if (of_args.args[0] != 0 || of_args.args[1] != 0 || of_args.args[2] != pctrl->data->n_port_pins) { dev_err(pctrl->dev, "gpio-ranges does not match selected SOC\n"); diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index bd13b5ef246d..0022ee0851fa 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1149,7 +1149,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) ret = platform_get_irq_optional(pdev, 0); if (ret < 0 && ret != -ENXIO) - return ret; + goto err_put_banks; if (ret > 0) drvdata->irq = ret; diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index e198233c10ba..4a3f5f5b966d 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -85,7 +85,6 @@ struct stm32_pinctrl_group { struct stm32_gpio_bank { void __iomem *base; - struct clk *clk; struct reset_control *rstc; spinlock_t lock; struct gpio_chip gpio_chip; @@ -107,6 +106,7 @@ struct stm32_pinctrl { unsigned ngroups; const char **grp_names; struct stm32_gpio_bank *banks; + struct clk_bulk_data *clks; unsigned nbanks; const struct stm32_pinctrl_match_data *match_data; struct irq_domain *domain; @@ -1273,6 +1273,30 @@ static const struct pinconf_ops stm32_pconf_ops = { .pin_config_dbg_show = stm32_pconf_dbg_show, }; +static struct stm32_desc_pin *stm32_pctrl_get_desc_pin_from_gpio(struct stm32_pinctrl *pctl, + struct stm32_gpio_bank *bank, + unsigned int offset) +{ + unsigned int stm32_pin_nb = bank->bank_nr * STM32_GPIO_PINS_PER_BANK + offset; + struct stm32_desc_pin *pin_desc; + int i; + + /* With few exceptions (e.g. bank 'Z'), pin number matches with pin index in array */ + if (stm32_pin_nb < pctl->npins) { + pin_desc = pctl->pins + stm32_pin_nb; + if (pin_desc->pin.number == stm32_pin_nb) + return pin_desc; + } + + /* Otherwise, loop all array to find the pin with the right number */ + for (i = 0; i < pctl->npins; i++) { + pin_desc = pctl->pins + i; + if (pin_desc->pin.number == stm32_pin_nb) + return pin_desc; + } + return NULL; +} + static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode_handle *fwnode) { struct stm32_gpio_bank *bank = &pctl->banks[pctl->nbanks]; @@ -1283,6 +1307,8 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode struct resource res; int npins = STM32_GPIO_PINS_PER_BANK; int bank_nr, err, i = 0; + struct stm32_desc_pin *stm32_pin; + char **names; if (!IS_ERR(bank->rstc)) reset_control_deassert(bank->rstc); @@ -1294,12 +1320,6 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode if (IS_ERR(bank->base)) return PTR_ERR(bank->base); - err = clk_prepare_enable(bank->clk); - if (err) { - dev_err(dev, "failed to prepare_enable clk (%d)\n", err); - return err; - } - bank->gpio_chip = stm32_gpio_template; fwnode_property_read_string(fwnode, "st,bank-name", &bank->gpio_chip.label); @@ -1346,24 +1366,35 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct fwnode bank->fwnode, &stm32_gpio_domain_ops, bank); - if (!bank->domain) { - err = -ENODEV; - goto err_clk; + if (!bank->domain) + return -ENODEV; + } + + names = devm_kcalloc(dev, npins, sizeof(char *), GFP_KERNEL); + if (!names) + return -ENOMEM; + + for (i = 0; i < npins; i++) { + stm32_pin = stm32_pctrl_get_desc_pin_from_gpio(pctl, bank, i); + if (stm32_pin && stm32_pin->pin.name) { + names[i] = devm_kasprintf(dev, GFP_KERNEL, "%s", stm32_pin->pin.name); + if (!names[i]) + return -ENOMEM; + } else { + names[i] = NULL; } } + bank->gpio_chip.names = (const char * const *)names; + err = gpiochip_add_data(&bank->gpio_chip, bank); if (err) { dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_nr); - goto err_clk; + return err; } dev_info(dev, "%s bank added\n", bank->gpio_chip.label); return 0; - -err_clk: - clk_disable_unprepare(bank->clk); - return err; } static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pdev) @@ -1591,6 +1622,11 @@ int stm32_pctl_probe(struct platform_device *pdev) if (!pctl->banks) return -ENOMEM; + pctl->clks = devm_kcalloc(dev, banks, sizeof(*pctl->clks), + GFP_KERNEL); + if (!pctl->clks) + return -ENOMEM; + i = 0; for_each_gpiochip_node(dev, child) { struct stm32_gpio_bank *bank = &pctl->banks[i]; @@ -1602,24 +1638,27 @@ int stm32_pctl_probe(struct platform_device *pdev) return -EPROBE_DEFER; } - bank->clk = of_clk_get_by_name(np, NULL); - if (IS_ERR(bank->clk)) { + pctl->clks[i].clk = of_clk_get_by_name(np, NULL); + if (IS_ERR(pctl->clks[i].clk)) { fwnode_handle_put(child); - return dev_err_probe(dev, PTR_ERR(bank->clk), + return dev_err_probe(dev, PTR_ERR(pctl->clks[i].clk), "failed to get clk\n"); } + pctl->clks[i].id = "pctl"; i++; } + ret = clk_bulk_prepare_enable(banks, pctl->clks); + if (ret) { + dev_err(dev, "failed to prepare_enable clk (%d)\n", ret); + return ret; + } + for_each_gpiochip_node(dev, child) { ret = stm32_gpiolib_register_bank(pctl, child); if (ret) { fwnode_handle_put(child); - - for (i = 0; i < pctl->nbanks; i++) - clk_disable_unprepare(pctl->banks[i].clk); - - return ret; + goto err_register; } pctl->nbanks++; @@ -1628,6 +1667,15 @@ int stm32_pctl_probe(struct platform_device *pdev) dev_info(dev, "Pinctrl STM32 initialized\n"); return 0; +err_register: + for (i = 0; i < pctl->nbanks; i++) { + struct stm32_gpio_bank *bank = &pctl->banks[i]; + + gpiochip_remove(&bank->gpio_chip); + } + + clk_bulk_disable_unprepare(banks, pctl->clks); + return ret; } static int __maybe_unused stm32_pinctrl_restore_gpio_regs( @@ -1696,10 +1744,8 @@ static int __maybe_unused stm32_pinctrl_restore_gpio_regs( int __maybe_unused stm32_pinctrl_suspend(struct device *dev) { struct stm32_pinctrl *pctl = dev_get_drvdata(dev); - int i; - for (i = 0; i < pctl->nbanks; i++) - clk_disable(pctl->banks[i].clk); + clk_bulk_disable(pctl->nbanks, pctl->clks); return 0; } @@ -1708,10 +1754,11 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev) { struct stm32_pinctrl *pctl = dev_get_drvdata(dev); struct stm32_pinctrl_group *g = pctl->groups; - int i; + int i, ret; - for (i = 0; i < pctl->nbanks; i++) - clk_enable(pctl->banks[i].clk); + ret = clk_bulk_enable(pctl->nbanks, pctl->clks); + if (ret) + return ret; for (i = 0; i < pctl->ngroups; i++, g++) stm32_pinctrl_restore_gpio_regs(pctl, g->pin); diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c index 50bd26a30ac0..78de32666d56 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra.c @@ -270,13 +270,16 @@ static int tegra_pinctrl_set_mux(struct pinctrl_dev *pctldev, val = pmx_readl(pmx, g->mux_bank, g->mux_reg); val &= ~(0x3 << g->mux_bit); val |= i << g->mux_bit; + /* Set the SFIO/GPIO selection to SFIO when under pinmux control*/ + if (pmx->soc->sfsel_in_mux) + val |= (1 << g->sfsel_bit); pmx_writel(pmx, val, g->mux_bank, g->mux_reg); return 0; } -static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, - unsigned int offset) +static int tegra_pinctrl_get_group_index(struct pinctrl_dev *pctldev, + unsigned int offset) { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); unsigned int group, num_pins, j; @@ -289,12 +292,35 @@ static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev * continue; for (j = 0; j < num_pins; j++) { if (offset == pins[j]) - return &pmx->soc->groups[group]; + return group; } } - dev_err(pctldev->dev, "Pingroup not found for pin %u\n", offset); - return NULL; + return -EINVAL; +} + +static const struct tegra_pingroup *tegra_pinctrl_get_group(struct pinctrl_dev *pctldev, + unsigned int offset, + int group_index) +{ + struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + + if (group_index < 0 || group_index >= pmx->soc->ngroups) + return NULL; + + return &pmx->soc->groups[group_index]; +} + +static struct tegra_pingroup_config *tegra_pinctrl_get_group_config(struct pinctrl_dev *pctldev, + unsigned int offset, + int group_index) +{ + struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); + + if (group_index < 0) + return NULL; + + return &pmx->pingroup_configs[group_index]; } static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, @@ -303,12 +329,15 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); const struct tegra_pingroup *group; + struct tegra_pingroup_config *config; + int group_index; u32 value; if (!pmx->soc->sfsel_in_mux) return 0; - group = tegra_pinctrl_get_group(pctldev, offset); + group_index = tegra_pinctrl_get_group_index(pctldev, offset); + group = tegra_pinctrl_get_group(pctldev, offset, group_index); if (!group) return -EINVAL; @@ -316,7 +345,11 @@ static int tegra_pinctrl_gpio_request_enable(struct pinctrl_dev *pctldev, if (group->mux_reg < 0 || group->sfsel_bit < 0) return -EINVAL; + config = tegra_pinctrl_get_group_config(pctldev, offset, group_index); + if (!config) + return -EINVAL; value = pmx_readl(pmx, group->mux_bank, group->mux_reg); + config->is_sfsel = (value & BIT(group->sfsel_bit)) != 0; value &= ~BIT(group->sfsel_bit); pmx_writel(pmx, value, group->mux_bank, group->mux_reg); @@ -329,12 +362,15 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev, { struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev); const struct tegra_pingroup *group; + struct tegra_pingroup_config *config; + int group_index; u32 value; if (!pmx->soc->sfsel_in_mux) return; - group = tegra_pinctrl_get_group(pctldev, offset); + group_index = tegra_pinctrl_get_group_index(pctldev, offset); + group = tegra_pinctrl_get_group(pctldev, offset, group_index); if (!group) return; @@ -342,8 +378,12 @@ static void tegra_pinctrl_gpio_disable_free(struct pinctrl_dev *pctldev, if (group->mux_reg < 0 || group->sfsel_bit < 0) return; + config = tegra_pinctrl_get_group_config(pctldev, offset, group_index); + if (!config) + return; value = pmx_readl(pmx, group->mux_bank, group->mux_reg); - value |= BIT(group->sfsel_bit); + if (config->is_sfsel) + value |= BIT(group->sfsel_bit); pmx_writel(pmx, value, group->mux_bank, group->mux_reg); } @@ -796,6 +836,12 @@ int tegra_pinctrl_probe(struct platform_device *pdev, pmx->dev = &pdev->dev; pmx->soc = soc_data; + pmx->pingroup_configs = devm_kcalloc(&pdev->dev, + pmx->soc->ngroups, sizeof(*pmx->pingroup_configs), + GFP_KERNEL); + if (!pmx->pingroup_configs) + return -ENOMEM; + /* * Each mux group will appear in 4 functions' list of groups. * This over-allocates slightly, since not all groups are mux groups. diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.h b/drivers/pinctrl/tegra/pinctrl-tegra.h index f8269858eb78..ec5198d391ea 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra.h +++ b/drivers/pinctrl/tegra/pinctrl-tegra.h @@ -8,6 +8,10 @@ #ifndef __PINMUX_TEGRA_H__ #define __PINMUX_TEGRA_H__ +struct tegra_pingroup_config { + bool is_sfsel; +}; + struct tegra_pmx { struct device *dev; struct pinctrl_dev *pctl; @@ -18,6 +22,8 @@ struct tegra_pmx { int nbanks; void __iomem **regs; u32 *backup_regs; + /* Array of size soc->ngroups */ + struct tegra_pingroup_config *pingroup_configs; }; enum tegra_pinconf_param { diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index a74d01e9089e..51b98f6c7b39 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -446,6 +446,7 @@ static int cros_typec_init_ports(struct cros_typec_data *typec) return 0; unregister_ports: + fwnode_handle_put(fwnode); cros_unregister_ports(typec); return ret; } diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index ee67efdd5499..da765a7dedbc 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -88,6 +88,7 @@ enum acer_wmi_event_ids { WMID_HOTKEY_EVENT = 0x1, WMID_ACCEL_OR_KBD_DOCK_EVENT = 0x5, WMID_GAMING_TURBO_KEY_EVENT = 0x7, + WMID_AC_EVENT = 0x8, }; static const struct key_entry acer_wmi_keymap[] __initconst = { @@ -1999,6 +2000,9 @@ static void acer_wmi_notify(u32 value, void *context) if (return_value.key_num == 0x4) acer_toggle_turbo(); break; + case WMID_AC_EVENT: + /* We ignore AC events here */ + break; default: pr_warn("Unknown function number - %d - %d\n", return_value.function, return_value.key_num); diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c index f237c1ea8d35..8eaeb1e8f975 100644 --- a/drivers/platform/x86/amd/pmc.c +++ b/drivers/platform/x86/amd/pmc.c @@ -834,6 +834,10 @@ static int __maybe_unused amd_pmc_suspend_handler(struct device *dev) { struct amd_pmc_dev *pdev = dev_get_drvdata(dev); + /* + * Must be called only from the same set of dev_pm_ops handlers + * as i8042_pm_suspend() is called: currently just from .suspend. + */ if (pdev->cpu_id == AMD_CPU_ID_CZN) { int rc = amd_pmc_czn_wa_irq1(pdev); @@ -846,7 +850,9 @@ static int __maybe_unused amd_pmc_suspend_handler(struct device *dev) return 0; } -static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL); +static const struct dev_pm_ops amd_pmc_pm = { + .suspend = amd_pmc_suspend_handler, +}; #endif diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 47b2f8bb6fb5..c60744d97d5b 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -427,11 +427,14 @@ static int asus_pega_lucid_set(struct asus_laptop *asus, int unit, bool enable) static int pega_acc_axis(struct asus_laptop *asus, int curr, char *method) { + unsigned long long val = (unsigned long long)curr; + acpi_status status; int i, delta; - unsigned long long val; - for (i = 0; i < PEGA_ACC_RETRIES; i++) { - acpi_evaluate_integer(asus->handle, method, NULL, &val); + for (i = 0; i < PEGA_ACC_RETRIES; i++) { + status = acpi_evaluate_integer(asus->handle, method, NULL, &val); + if (ACPI_FAILURE(status)) + continue; /* The output is noisy. From reading the ASL * dissassembly, timeout errors are returned with 1's * in the high word, and the lack of locking around diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index af3da303e2b1..cba515ce3444 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -590,6 +590,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0xC4, { KEY_KBDILLUMUP } }, { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */ + { KE_IGNORE, 0xCF, }, /* AC mode */ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */ { KE_KEY, 0xBD, { KEY_PROG2 } }, /* Lid flip action on ROG xflow laptops */ { KE_END, 0}, diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 296150eaef92..33eacb4fc4c4 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -3804,7 +3804,8 @@ static int asus_wmi_add(struct platform_device *pdev) goto fail_leds; asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); - if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) + if ((result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) == + (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) asus->driver->wlan_ctrl_by_user = 1; if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) { diff --git a/drivers/platform/x86/dell/dell-smbios-base.c b/drivers/platform/x86/dell/dell-smbios-base.c index 6fb538a13868..9a9b9feac416 100644 --- a/drivers/platform/x86/dell/dell-smbios-base.c +++ b/drivers/platform/x86/dell/dell-smbios-base.c @@ -544,6 +544,7 @@ static int __init dell_smbios_init(void) int ret, wmi, smm; if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) && + !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Alienware", NULL) && !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) { pr_err("Unable to run on non-Dell system\n"); return -ENODEV; diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c index 24fd7ffadda9..841a5414d28a 100644 --- a/drivers/platform/x86/dell/dell-wmi-base.c +++ b/drivers/platform/x86/dell/dell-wmi-base.c @@ -80,6 +80,12 @@ static const struct dmi_system_id dell_wmi_smbios_list[] __initconst = { static const struct key_entry dell_wmi_keymap_type_0000[] = { { KE_IGNORE, 0x003a, { KEY_CAPSLOCK } }, + /* Meta key lock */ + { KE_IGNORE, 0xe000, { KEY_RIGHTMETA } }, + + /* Meta key unlock */ + { KE_IGNORE, 0xe001, { KEY_RIGHTMETA } }, + /* Key code is followed by brightness level */ { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } }, diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index 230e6ee96636..d8f1bf5e58a0 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -45,7 +45,7 @@ static ssize_t current_password_store(struct kobject *kobj, int length; length = strlen(buf); - if (buf[length-1] == '\n') + if (length && buf[length - 1] == '\n') length--; /* firmware does verifiation of min/max password length, diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index b543d117b12c..259eb2a2858f 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -17,13 +17,13 @@ /* * fujitsu-laptop.c - Fujitsu laptop support, providing access to additional * features made available on a range of Fujitsu laptops including the - * P2xxx/P5xxx/S6xxx/S7xxx series. + * P2xxx/P5xxx/S2xxx/S6xxx/S7xxx series. * * This driver implements a vendor-specific backlight control interface for * Fujitsu laptops and provides support for hotkeys present on certain Fujitsu * laptops. * - * This driver has been tested on a Fujitsu Lifebook S6410, S7020 and + * This driver has been tested on a Fujitsu Lifebook S2110, S6410, S7020 and * P8010. It should work on most P-series and S-series Lifebooks, but * YMMV. * @@ -102,7 +102,11 @@ #define KEY2_CODE 0x411 #define KEY3_CODE 0x412 #define KEY4_CODE 0x413 -#define KEY5_CODE 0x420 +#define KEY5_CODE 0x414 +#define KEY6_CODE 0x415 +#define KEY7_CODE 0x416 +#define KEY8_CODE 0x417 +#define KEY9_CODE 0x420 /* Hotkey ringbuffer limits */ #define MAX_HOTKEY_RINGBUFFER_SIZE 100 @@ -450,7 +454,7 @@ static const struct key_entry keymap_default[] = { { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, { KE_KEY, KEY3_CODE, { KEY_PROG3 } }, { KE_KEY, KEY4_CODE, { KEY_PROG4 } }, - { KE_KEY, KEY5_CODE, { KEY_RFKILL } }, + { KE_KEY, KEY9_CODE, { KEY_RFKILL } }, /* Soft keys read from status flags */ { KE_KEY, FLAG_RFKILL, { KEY_RFKILL } }, { KE_KEY, FLAG_TOUCHPAD_TOGGLE, { KEY_TOUCHPAD_TOGGLE } }, @@ -474,6 +478,18 @@ static const struct key_entry keymap_p8010[] = { { KE_END, 0 } }; +static const struct key_entry keymap_s2110[] = { + { KE_KEY, KEY1_CODE, { KEY_PROG1 } }, /* "A" */ + { KE_KEY, KEY2_CODE, { KEY_PROG2 } }, /* "B" */ + { KE_KEY, KEY3_CODE, { KEY_WWW } }, /* "Internet" */ + { KE_KEY, KEY4_CODE, { KEY_EMAIL } }, /* "E-mail" */ + { KE_KEY, KEY5_CODE, { KEY_STOPCD } }, + { KE_KEY, KEY6_CODE, { KEY_PLAYPAUSE } }, + { KE_KEY, KEY7_CODE, { KEY_PREVIOUSSONG } }, + { KE_KEY, KEY8_CODE, { KEY_NEXTSONG } }, + { KE_END, 0 } +}; + static const struct key_entry *keymap = keymap_default; static int fujitsu_laptop_dmi_keymap_override(const struct dmi_system_id *id) @@ -511,6 +527,15 @@ static const struct dmi_system_id fujitsu_laptop_dmi_table[] = { }, .driver_data = (void *)keymap_p8010 }, + { + .callback = fujitsu_laptop_dmi_keymap_override, + .ident = "Fujitsu LifeBook S2110", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK S2110"), + }, + .driver_data = (void *)keymap_s2110 + }, {} }; diff --git a/drivers/platform/x86/intel/bxtwc_tmu.c b/drivers/platform/x86/intel/bxtwc_tmu.c index 7ccf583649e6..3c9778366d93 100644 --- a/drivers/platform/x86/intel/bxtwc_tmu.c +++ b/drivers/platform/x86/intel/bxtwc_tmu.c @@ -48,9 +48,8 @@ static irqreturn_t bxt_wcove_tmu_irq_handler(int irq, void *data) static int bxt_wcove_tmu_probe(struct platform_device *pdev) { struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent); - struct regmap_irq_chip_data *regmap_irq_chip; struct wcove_tmu *wctmu; - int ret, virq, irq; + int ret; wctmu = devm_kzalloc(&pdev->dev, sizeof(*wctmu), GFP_KERNEL); if (!wctmu) @@ -59,27 +58,18 @@ static int bxt_wcove_tmu_probe(struct platform_device *pdev) wctmu->dev = &pdev->dev; wctmu->regmap = pmic->regmap; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; + wctmu->irq = platform_get_irq(pdev, 0); + if (wctmu->irq < 0) + return wctmu->irq; - regmap_irq_chip = pmic->irq_chip_data_tmu; - virq = regmap_irq_get_virq(regmap_irq_chip, irq); - if (virq < 0) { - dev_err(&pdev->dev, - "failed to get virtual interrupt=%d\n", irq); - return virq; - } - - ret = devm_request_threaded_irq(&pdev->dev, virq, + ret = devm_request_threaded_irq(&pdev->dev, wctmu->irq, NULL, bxt_wcove_tmu_irq_handler, IRQF_ONESHOT, "bxt_wcove_tmu", wctmu); if (ret) { dev_err(&pdev->dev, "request irq failed: %d,virq: %d\n", - ret, virq); + ret, wctmu->irq); return ret; } - wctmu->irq = virq; /* Unmask TMU second level Wake & System alarm */ regmap_update_bits(wctmu->regmap, BXTWC_MTMUIRQ_REG, diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index b96ef0eb82af..c13837401c26 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -109,6 +109,13 @@ static const struct dmi_system_id button_array_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"), }, }, + { + .ident = "Microsoft Surface Go 4", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c index c42c3faa2c32..0f16436e5804 100644 --- a/drivers/platform/x86/intel/int3472/discrete.c +++ b/drivers/platform/x86/intel/int3472/discrete.c @@ -359,6 +359,9 @@ static int skl_int3472_discrete_probe(struct platform_device *pdev) struct int3472_cldb cldb; int ret; + if (!adev) + return -ENODEV; + ret = skl_int3472_fill_cldb(adev, &cldb); if (ret) { dev_err(&pdev->dev, "Couldn't fill CLDB structure\n"); diff --git a/drivers/platform/x86/intel/int3472/tps68470.c b/drivers/platform/x86/intel/int3472/tps68470.c index 5b8d1a9620a5..82fb2fbc1000 100644 --- a/drivers/platform/x86/intel/int3472/tps68470.c +++ b/drivers/platform/x86/intel/int3472/tps68470.c @@ -152,6 +152,9 @@ static int skl_int3472_tps68470_probe(struct i2c_client *client) int ret; int i; + if (!adev) + return -ENODEV; + n_consumers = skl_int3472_fill_clk_pdata(&client->dev, &clk_pdata); if (n_consumers < 0) return n_consumers; diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index 380e36953e31..595db17ca129 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -77,7 +77,7 @@ static DECLARE_HASHTABLE(isst_hash, 8); static DEFINE_MUTEX(isst_hash_lock); static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param, - u32 data) + u64 data) { struct isst_cmd *sst_cmd; diff --git a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c index 00ac7e381441..0be3087e21d5 100644 --- a/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c +++ b/drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c @@ -121,15 +121,13 @@ static int uncore_event_cpu_online(unsigned int cpu) { struct uncore_data *data; int target; + int ret; /* Check if there is an online cpu in the package for uncore MSR */ target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); if (target < nr_cpu_ids) return 0; - /* Use this CPU on this die as a control CPU */ - cpumask_set_cpu(cpu, &uncore_cpu_mask); - data = uncore_get_instance(cpu); if (!data) return 0; @@ -137,7 +135,14 @@ static int uncore_event_cpu_online(unsigned int cpu) data->package_id = topology_physical_package_id(cpu); data->die_id = topology_die_id(cpu); - return uncore_freq_add_entry(data, cpu); + ret = uncore_freq_add_entry(data, cpu); + if (ret) + return ret; + + /* Use this CPU on this die as a control CPU */ + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + return 0; } static int uncore_event_cpu_offline(unsigned int cpu) diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c index 053be5c5e0ca..eff920de31c2 100644 --- a/drivers/platform/x86/p2sb.c +++ b/drivers/platform/x86/p2sb.c @@ -42,8 +42,9 @@ struct p2sb_res_cache { }; static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE]; +static bool p2sb_hidden_by_bios; -static int p2sb_get_devfn(unsigned int *devfn) +static void p2sb_get_devfn(unsigned int *devfn) { unsigned int fn = P2SB_DEVFN_DEFAULT; const struct x86_cpu_id *id; @@ -53,7 +54,6 @@ static int p2sb_get_devfn(unsigned int *devfn) fn = (unsigned int)id->driver_data; *devfn = fn; - return 0; } static bool p2sb_valid_resource(const struct resource *res) @@ -97,6 +97,12 @@ static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn) static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn) { + /* + * The BIOS prevents the P2SB device from being enumerated by the PCI + * subsystem, so we need to unhide and hide it back to lookup the BAR. + */ + pci_bus_write_config_dword(bus, devfn, P2SBC, 0); + /* Scan the P2SB device and cache its BAR0 */ p2sb_scan_and_cache_devfn(bus, devfn); @@ -104,6 +110,8 @@ static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn) if (devfn == P2SB_DEVFN_GOLDMONT) p2sb_scan_and_cache_devfn(bus, SPI_DEVFN_GOLDMONT); + pci_bus_write_config_dword(bus, devfn, P2SBC, P2SBC_HIDE); + if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res)) return -ENOENT; @@ -129,12 +137,10 @@ static int p2sb_cache_resources(void) u32 value = P2SBC_HIDE; struct pci_bus *bus; u16 class; - int ret; + int ret = 0; /* Get devfn for P2SB device itself */ - ret = p2sb_get_devfn(&devfn_p2sb); - if (ret) - return ret; + p2sb_get_devfn(&devfn_p2sb); bus = p2sb_get_bus(NULL); if (!bus) @@ -154,26 +160,57 @@ static int p2sb_cache_resources(void) */ pci_lock_rescan_remove(); - /* - * The BIOS prevents the P2SB device from being enumerated by the PCI - * subsystem, so we need to unhide and hide it back to lookup the BAR. - * Unhide the P2SB device here, if needed. - */ pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value); - if (value & P2SBC_HIDE) - pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0); + p2sb_hidden_by_bios = value & P2SBC_HIDE; - ret = p2sb_scan_and_cache(bus, devfn_p2sb); - - /* Hide the P2SB device, if it was hidden */ - if (value & P2SBC_HIDE) - pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, P2SBC_HIDE); + /* + * If the BIOS does not hide the P2SB device then its resources + * are accesilble. Cache them only if the P2SB device is hidden. + */ + if (p2sb_hidden_by_bios) + ret = p2sb_scan_and_cache(bus, devfn_p2sb); pci_unlock_rescan_remove(); return ret; } +static int p2sb_read_from_cache(struct pci_bus *bus, unsigned int devfn, + struct resource *mem) +{ + struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)]; + + if (cache->bus_dev_id != bus->dev.id) + return -ENODEV; + + if (!p2sb_valid_resource(&cache->res)) + return -ENOENT; + + memcpy(mem, &cache->res, sizeof(*mem)); + + return 0; +} + +static int p2sb_read_from_dev(struct pci_bus *bus, unsigned int devfn, + struct resource *mem) +{ + struct pci_dev *pdev; + int ret = 0; + + pdev = pci_get_slot(bus, devfn); + if (!pdev) + return -ENODEV; + + if (p2sb_valid_resource(pci_resource_n(pdev, 0))) + p2sb_read_bar0(pdev, mem); + else + ret = -ENOENT; + + pci_dev_put(pdev); + + return ret; +} + /** * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR * @bus: PCI bus to communicate with @@ -190,28 +227,17 @@ static int p2sb_cache_resources(void) */ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem) { - struct p2sb_res_cache *cache; - int ret; - bus = p2sb_get_bus(bus); if (!bus) return -ENODEV; - if (!devfn) { - ret = p2sb_get_devfn(&devfn); - if (ret) - return ret; - } + if (!devfn) + p2sb_get_devfn(&devfn); - cache = &p2sb_resources[PCI_FUNC(devfn)]; - if (cache->bus_dev_id != bus->dev.id) - return -ENODEV; + if (p2sb_hidden_by_bios) + return p2sb_read_from_cache(bus, devfn, mem); - if (!p2sb_valid_resource(&cache->res)) - return -ENOENT; - - memcpy(mem, &cache->res, sizeof(*mem)); - return 0; + return p2sb_read_from_dev(bus, devfn, mem); } EXPORT_SYMBOL_GPL(p2sb_bar); diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index ac7fb7a8fd59..e9bee5f6ec8d 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -602,8 +602,7 @@ static ssize_t eco_mode_show(struct device *dev, struct device_attribute *attr, result = 1; break; default: - result = -EIO; - break; + return -EIO; } return sysfs_emit(buf, "%u\n", result); } @@ -749,7 +748,12 @@ static ssize_t current_brightness_store(struct device *dev, struct device_attrib static ssize_t cdpower_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d\n", get_optd_power_state()); + int state = get_optd_power_state(); + + if (state < 0) + return state; + + return sysfs_emit(buf, "%d\n", state); } static ssize_t cdpower_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index c2fb19af1070..17d74434e604 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -211,6 +211,7 @@ enum tpacpi_hkey_event_t { /* Thermal events */ TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */ TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */ + TP_HKEY_EV_ALARM_BAT_LIM_CHANGE = 0x6013, /* battery charge limit changed*/ TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */ TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */ TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* windows; thermal table changed */ @@ -3948,6 +3949,10 @@ static bool hotkey_notify_6xxx(const u32 hkey, pr_alert("THERMAL EMERGENCY: battery is extremely hot!\n"); /* recommended action: immediate sleep/hibernate */ break; + case TP_HKEY_EV_ALARM_BAT_LIM_CHANGE: + pr_debug("Battery Info: battery charge threshold changed\n"); + /* User changed charging threshold. No action needed */ + return true; case TP_HKEY_EV_ALARM_SENSOR_HOT: pr_crit("THERMAL ALARM: a sensor reports something is too hot!\n"); /* recommended action: warn user through gui, that */ @@ -8161,6 +8166,7 @@ static struct ibm_struct volume_driver_data = { #define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */ #define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */ +#define FAN_CLOCK_TPM (22500*60) /* Ticks per minute for a 22.5 kHz clock */ enum { /* Fan control constants */ fan_status_offset = 0x2f, /* EC register 0x2f */ @@ -8213,6 +8219,8 @@ static u8 fan_control_resume_level; static int fan_watchdog_maxinterval; static bool fan_with_ns_addr; +static bool ecfw_with_fan_dec_rpm; +static bool fan_speed_in_tpr; static struct mutex fan_mutex; @@ -8395,8 +8403,11 @@ static int fan_get_speed(unsigned int *speed) !acpi_ec_read(fan_rpm_offset + 1, &hi))) return -EIO; - if (likely(speed)) + if (likely(speed)) { *speed = (hi << 8) | lo; + if (fan_speed_in_tpr && *speed != 0) + *speed = FAN_CLOCK_TPM / *speed; + } break; case TPACPI_FAN_RD_TPEC_NS: if (!acpi_ec_read(fan_rpm_status_ns, &lo)) @@ -8429,8 +8440,11 @@ static int fan2_get_speed(unsigned int *speed) if (rc) return -EIO; - if (likely(speed)) + if (likely(speed)) { *speed = (hi << 8) | lo; + if (fan_speed_in_tpr && *speed != 0) + *speed = FAN_CLOCK_TPM / *speed; + } break; case TPACPI_FAN_RD_TPEC_NS: @@ -8856,7 +8870,11 @@ static ssize_t fan_fan1_input_show(struct device *dev, if (res < 0) return res; - return sysfs_emit(buf, "%u\n", speed); + /* Check for fan speeds displayed in hexadecimal */ + if (!ecfw_with_fan_dec_rpm) + return sysfs_emit(buf, "%u\n", speed); + else + return sysfs_emit(buf, "%x\n", speed); } static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL); @@ -8873,7 +8891,11 @@ static ssize_t fan_fan2_input_show(struct device *dev, if (res < 0) return res; - return sysfs_emit(buf, "%u\n", speed); + /* Check for fan speeds displayed in hexadecimal */ + if (!ecfw_with_fan_dec_rpm) + return sysfs_emit(buf, "%u\n", speed); + else + return sysfs_emit(buf, "%x\n", speed); } static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL); @@ -8949,6 +8971,8 @@ static const struct attribute_group fan_driver_attr_group = { #define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */ #define TPACPI_FAN_NOFAN 0x0008 /* no fan available */ #define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */ +#define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */ +#define TPACPI_FAN_TPR 0x0040 /* Fan speed is in Ticks Per Revolution */ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1), @@ -8970,6 +8994,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('R', '1', 'F', TPACPI_FAN_NS), /* L13 Yoga Gen 2 */ TPACPI_Q_LNV3('N', '2', 'U', TPACPI_FAN_NS), /* X13 Yoga Gen 2*/ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ + TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */ + TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR), /* ThinkPad x120e */ }; static int __init fan_init(struct ibm_init_struct *iibm) @@ -9010,6 +9036,13 @@ static int __init fan_init(struct ibm_init_struct *iibm) tp_features.fan_ctrl_status_undef = 1; } + /* Check for the EC/BIOS with RPM reported in decimal*/ + if (quirks & TPACPI_FAN_DECRPM) { + pr_info("ECFW with fan RPM as decimal in EC register\n"); + ecfw_with_fan_dec_rpm = 1; + tp_features.fan_ctrl_status_undef = 1; + } + if (gfan_handle) { /* 570, 600e/x, 770e, 770x */ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; @@ -9026,6 +9059,8 @@ static int __init fan_init(struct ibm_init_struct *iibm) if (quirks & TPACPI_FAN_Q1) fan_quirk1_setup(); + if (quirks & TPACPI_FAN_TPR) + fan_speed_in_tpr = true; /* Try and probe the 2nd fan */ tp_features.second_fan = 1; /* needed for get_speed to work */ res = fan2_get_speed(&speed); @@ -9221,7 +9256,11 @@ static int fan_read(struct seq_file *m) if (rc < 0) return rc; - seq_printf(m, "speed:\t\t%d\n", speed); + /* Check for fan speeds displayed in hexadecimal */ + if (!ecfw_with_fan_dec_rpm) + seq_printf(m, "speed:\t\t%d\n", speed); + else + seq_printf(m, "speed:\t\t%x\n", speed); if (fan_status_access_mode == TPACPI_FAN_RD_TPEC_NS) { /* @@ -10091,6 +10130,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = { * Individual addressing is broken on models that expose the * primary battery as BAT1. */ + TPACPI_Q_LNV('G', '8', true), /* ThinkPad X131e */ TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */ TPACPI_Q_LNV('J', '7', true), /* B5400 */ TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */ @@ -10450,6 +10490,10 @@ static struct ibm_struct proxsensor_driver_data = { #define DYTC_MODE_PSC_BALANCE 5 /* Default mode aka balanced */ #define DYTC_MODE_PSC_PERFORM 7 /* High power mode aka performance */ +#define DYTC_MODE_PSCV9_LOWPOWER 1 /* Low power mode */ +#define DYTC_MODE_PSCV9_BALANCE 3 /* Default mode aka balanced */ +#define DYTC_MODE_PSCV9_PERFORM 4 /* High power mode aka performance */ + #define DYTC_ERR_MASK 0xF /* Bits 0-3 in cmd result are the error result */ #define DYTC_ERR_SUCCESS 1 /* CMD completed successful */ @@ -10470,6 +10514,10 @@ static int dytc_capabilities; static bool dytc_mmc_get_available; static int profile_force; +static int platform_psc_profile_lowpower = DYTC_MODE_PSC_LOWPOWER; +static int platform_psc_profile_balanced = DYTC_MODE_PSC_BALANCE; +static int platform_psc_profile_performance = DYTC_MODE_PSC_PERFORM; + static int convert_dytc_to_profile(int funcmode, int dytcmode, enum platform_profile_option *profile) { @@ -10491,19 +10539,15 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode, } return 0; case DYTC_FUNCTION_PSC: - switch (dytcmode) { - case DYTC_MODE_PSC_LOWPOWER: + if (dytcmode == platform_psc_profile_lowpower) *profile = PLATFORM_PROFILE_LOW_POWER; - break; - case DYTC_MODE_PSC_BALANCE: + else if (dytcmode == platform_psc_profile_balanced) *profile = PLATFORM_PROFILE_BALANCED; - break; - case DYTC_MODE_PSC_PERFORM: + else if (dytcmode == platform_psc_profile_performance) *profile = PLATFORM_PROFILE_PERFORMANCE; - break; - default: /* Unknown mode */ + else return -EINVAL; - } + return 0; case DYTC_FUNCTION_AMT: /* For now return balanced. It's the closest we have to 'auto' */ @@ -10524,19 +10568,19 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_LOWPOWER; else if (dytc_capabilities & BIT(DYTC_FC_PSC)) - *perfmode = DYTC_MODE_PSC_LOWPOWER; + *perfmode = platform_psc_profile_lowpower; break; case PLATFORM_PROFILE_BALANCED: if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_BALANCE; else if (dytc_capabilities & BIT(DYTC_FC_PSC)) - *perfmode = DYTC_MODE_PSC_BALANCE; + *perfmode = platform_psc_profile_balanced; break; case PLATFORM_PROFILE_PERFORMANCE: if (dytc_capabilities & BIT(DYTC_FC_MMC)) *perfmode = DYTC_MODE_MMC_PERFORM; else if (dytc_capabilities & BIT(DYTC_FC_PSC)) - *perfmode = DYTC_MODE_PSC_PERFORM; + *perfmode = platform_psc_profile_performance; break; default: /* Unknown profile */ return -EOPNOTSUPP; @@ -10725,6 +10769,7 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) if (output & BIT(DYTC_QUERY_ENABLE_BIT)) dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF; + dbg_printk(TPACPI_DBG_INIT, "DYTC version %d\n", dytc_version); /* Check DYTC is enabled and supports mode setting */ if (dytc_version < 5) return -ENODEV; @@ -10763,6 +10808,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) } } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */ pr_debug("PSC is supported\n"); + if (dytc_version >= 9) { /* update profiles for DYTC 9 and up */ + platform_psc_profile_lowpower = DYTC_MODE_PSCV9_LOWPOWER; + platform_psc_profile_balanced = DYTC_MODE_PSCV9_BALANCE; + platform_psc_profile_performance = DYTC_MODE_PSCV9_PERFORM; + } } else { dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n"); return -ENODEV; @@ -11472,6 +11522,8 @@ static int __must_check __init get_thinkpad_model_data( tp->vendor = PCI_VENDOR_ID_IBM; else if (dmi_name_in_vendors("LENOVO")) tp->vendor = PCI_VENDOR_ID_LENOVO; + else if (dmi_name_in_vendors("NEC")) + tp->vendor = PCI_VENDOR_ID_LENOVO; else return 0; diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 4a5371a3a531..2868dcf3f96d 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -449,9 +449,29 @@ static u8 [BQ27XXX_REG_AP] = 0x18, BQ27XXX_DM_REG_ROWS, }, + bq27426_regs[BQ27XXX_REG_MAX] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x02, + [BQ27XXX_REG_INT_TEMP] = 0x1e, + [BQ27XXX_REG_VOLT] = 0x04, + [BQ27XXX_REG_AI] = 0x10, + [BQ27XXX_REG_FLAGS] = 0x06, + [BQ27XXX_REG_TTE] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTF] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTES] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, + [BQ27XXX_REG_NAC] = 0x08, + [BQ27XXX_REG_RC] = 0x0c, + [BQ27XXX_REG_FCC] = 0x0e, + [BQ27XXX_REG_CYCT] = INVALID_REG_ADDR, + [BQ27XXX_REG_AE] = INVALID_REG_ADDR, + [BQ27XXX_REG_SOC] = 0x1c, + [BQ27XXX_REG_DCAP] = INVALID_REG_ADDR, + [BQ27XXX_REG_AP] = 0x18, + BQ27XXX_DM_REG_ROWS, + }, #define bq27411_regs bq27421_regs #define bq27425_regs bq27421_regs -#define bq27426_regs bq27421_regs #define bq27441_regs bq27421_regs #define bq27621_regs bq27421_regs bq27z561_regs[BQ27XXX_REG_MAX] = { @@ -769,10 +789,23 @@ static enum power_supply_property bq27421_props[] = { }; #define bq27411_props bq27421_props #define bq27425_props bq27421_props -#define bq27426_props bq27421_props #define bq27441_props bq27421_props #define bq27621_props bq27421_props +static enum power_supply_property bq27426_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_NOW, + POWER_SUPPLY_PROP_MANUFACTURER, +}; + static enum power_supply_property bq27z561_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c index 8c5e2c49d6c1..9d0a6ab698f5 100644 --- a/drivers/power/supply/da9150-fg.c +++ b/drivers/power/supply/da9150-fg.c @@ -248,9 +248,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg, DA9150_QIF_SD_GAIN_SIZE); da9150_fg_read_sync_end(fg); - div = (u64) (sd_gain * shunt_val * 65536ULL); + div = 65536ULL * sd_gain * shunt_val; do_div(div, 1000000); - res = (u64) (iavg * 1000000ULL); + res = 1000000ULL * iavg; do_div(res, div); val->intval = (int) res; diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c index 68212b39785b..6139f736ecbe 100644 --- a/drivers/power/supply/gpio-charger.c +++ b/drivers/power/supply/gpio-charger.c @@ -67,6 +67,14 @@ static int set_charge_current_limit(struct gpio_charger *gpio_charger, int val) if (gpio_charger->current_limit_map[i].limit_ua <= val) break; } + + /* + * If a valid charge current limit isn't found, default to smallest + * current limitation for safety reasons. + */ + if (i >= gpio_charger->current_limit_map_size) + i = gpio_charger->current_limit_map_size - 1; + mapping = gpio_charger->current_limit_map[i]; for (i = 0; i < ndescs; i++) { diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c index a2c5c9858639..ef3482fa4023 100644 --- a/drivers/power/supply/max77693_charger.c +++ b/drivers/power/supply/max77693_charger.c @@ -556,7 +556,7 @@ static int max77693_set_charge_input_threshold_volt(struct max77693_charger *chg case 4700000: case 4800000: case 4900000: - data = (uvolt - 4700000) / 100000; + data = ((uvolt - 4700000) / 100000) + 1; break; default: dev_err(chg->dev, "Wrong value for charge input voltage regulation threshold\n"); diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index ac88c9636b66..13801dfa5c38 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -480,8 +480,6 @@ EXPORT_SYMBOL_GPL(power_supply_get_by_name); */ void power_supply_put(struct power_supply *psy) { - might_sleep(); - atomic_dec(&psy->use_cnt); put_device(&psy->dev); } diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index ff736b006198..fd475e463d1f 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -626,8 +626,7 @@ struct powercap_control_type *powercap_register_control_type( dev_set_name(&control_type->dev, "%s", name); result = device_register(&control_type->dev); if (result) { - if (control_type->allocated) - kfree(control_type); + put_device(&control_type->dev); return ERR_PTR(result); } idr_init(&control_type->idr); diff --git a/drivers/pps/clients/pps-gpio.c b/drivers/pps/clients/pps-gpio.c index 2f4b11b4dfcd..bf3b6f1aa984 100644 --- a/drivers/pps/clients/pps-gpio.c +++ b/drivers/pps/clients/pps-gpio.c @@ -214,8 +214,8 @@ static int pps_gpio_probe(struct platform_device *pdev) return -EINVAL; } - dev_info(data->pps->dev, "Registered IRQ %d as PPS source\n", - data->irq); + dev_dbg(&data->pps->dev, "Registered IRQ %d as PPS source\n", + data->irq); return 0; } diff --git a/drivers/pps/clients/pps-ktimer.c b/drivers/pps/clients/pps-ktimer.c index d33106bd7a29..2f465549b843 100644 --- a/drivers/pps/clients/pps-ktimer.c +++ b/drivers/pps/clients/pps-ktimer.c @@ -56,7 +56,7 @@ static struct pps_source_info pps_ktimer_info = { static void __exit pps_ktimer_exit(void) { - dev_info(pps->dev, "ktimer PPS source unregistered\n"); + dev_dbg(&pps->dev, "ktimer PPS source unregistered\n"); del_timer_sync(&ktimer); pps_unregister_source(pps); @@ -74,7 +74,7 @@ static int __init pps_ktimer_init(void) timer_setup(&ktimer, pps_ktimer_event, 0); mod_timer(&ktimer, jiffies + HZ); - dev_info(pps->dev, "ktimer PPS source registered\n"); + dev_dbg(&pps->dev, "ktimer PPS source registered\n"); return 0; } diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c index 443d6bae19d1..fa5660f3c4b7 100644 --- a/drivers/pps/clients/pps-ldisc.c +++ b/drivers/pps/clients/pps-ldisc.c @@ -32,7 +32,7 @@ static void pps_tty_dcd_change(struct tty_struct *tty, bool active) pps_event(pps, &ts, active ? PPS_CAPTUREASSERT : PPS_CAPTURECLEAR, NULL); - dev_dbg(pps->dev, "PPS %s at %lu\n", + dev_dbg(&pps->dev, "PPS %s at %lu\n", active ? "assert" : "clear", jiffies); } @@ -69,7 +69,7 @@ static int pps_tty_open(struct tty_struct *tty) goto err_unregister; } - dev_info(pps->dev, "source \"%s\" added\n", info.path); + dev_dbg(&pps->dev, "source \"%s\" added\n", info.path); return 0; @@ -89,7 +89,7 @@ static void pps_tty_close(struct tty_struct *tty) if (WARN_ON(!pps)) return; - dev_info(pps->dev, "removed\n"); + dev_info(&pps->dev, "removed\n"); pps_unregister_source(pps); } diff --git a/drivers/pps/clients/pps_parport.c b/drivers/pps/clients/pps_parport.c index 53e9c304ae0a..c3f46efd64c3 100644 --- a/drivers/pps/clients/pps_parport.c +++ b/drivers/pps/clients/pps_parport.c @@ -81,7 +81,7 @@ static void parport_irq(void *handle) /* check the signal (no signal means the pulse is lost this time) */ if (!signal_is_set(port)) { local_irq_restore(flags); - dev_err(dev->pps->dev, "lost the signal\n"); + dev_err(&dev->pps->dev, "lost the signal\n"); goto out_assert; } @@ -98,7 +98,7 @@ static void parport_irq(void *handle) /* timeout */ dev->cw_err++; if (dev->cw_err >= CLEAR_WAIT_MAX_ERRORS) { - dev_err(dev->pps->dev, "disabled clear edge capture after %d" + dev_err(&dev->pps->dev, "disabled clear edge capture after %d" " timeouts\n", dev->cw_err); dev->cw = 0; dev->cw_err = 0; diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index d9d566f70ed1..92d1b62ea239 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c @@ -41,7 +41,7 @@ static void pps_add_offset(struct pps_ktime *ts, struct pps_ktime *offset) static void pps_echo_client_default(struct pps_device *pps, int event, void *data) { - dev_info(pps->dev, "echo %s %s\n", + dev_info(&pps->dev, "echo %s %s\n", event & PPS_CAPTUREASSERT ? "assert" : "", event & PPS_CAPTURECLEAR ? "clear" : ""); } @@ -112,7 +112,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info, goto kfree_pps; } - dev_info(pps->dev, "new PPS source %s\n", info->name); + dev_dbg(&pps->dev, "new PPS source %s\n", info->name); return pps; @@ -166,7 +166,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, /* check event type */ BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); - dev_dbg(pps->dev, "PPS event at %lld.%09ld\n", + dev_dbg(&pps->dev, "PPS event at %lld.%09ld\n", (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec); timespec_to_pps_ktime(&ts_real, ts->ts_real); @@ -188,7 +188,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, /* Save the time stamp */ pps->assert_tu = ts_real; pps->assert_sequence++; - dev_dbg(pps->dev, "capture assert seq #%u\n", + dev_dbg(&pps->dev, "capture assert seq #%u\n", pps->assert_sequence); captured = ~0; @@ -202,7 +202,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, /* Save the time stamp */ pps->clear_tu = ts_real; pps->clear_sequence++; - dev_dbg(pps->dev, "capture clear seq #%u\n", + dev_dbg(&pps->dev, "capture clear seq #%u\n", pps->clear_sequence); captured = ~0; diff --git a/drivers/pps/kc.c b/drivers/pps/kc.c index 50dc59af45be..fbd23295afd7 100644 --- a/drivers/pps/kc.c +++ b/drivers/pps/kc.c @@ -43,11 +43,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args) pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); - dev_info(pps->dev, "unbound kernel" + dev_info(&pps->dev, "unbound kernel" " consumer\n"); } else { spin_unlock_irq(&pps_kc_hardpps_lock); - dev_err(pps->dev, "selected kernel consumer" + dev_err(&pps->dev, "selected kernel consumer" " is not bound\n"); return -EINVAL; } @@ -57,11 +57,11 @@ int pps_kc_bind(struct pps_device *pps, struct pps_bind_args *bind_args) pps_kc_hardpps_mode = bind_args->edge; pps_kc_hardpps_dev = pps; spin_unlock_irq(&pps_kc_hardpps_lock); - dev_info(pps->dev, "bound kernel consumer: " + dev_info(&pps->dev, "bound kernel consumer: " "edge=0x%x\n", bind_args->edge); } else { spin_unlock_irq(&pps_kc_hardpps_lock); - dev_err(pps->dev, "another kernel consumer" + dev_err(&pps->dev, "another kernel consumer" " is already bound\n"); return -EINVAL; } @@ -83,7 +83,7 @@ void pps_kc_remove(struct pps_device *pps) pps_kc_hardpps_mode = 0; pps_kc_hardpps_dev = NULL; spin_unlock_irq(&pps_kc_hardpps_lock); - dev_info(pps->dev, "unbound kernel consumer" + dev_info(&pps->dev, "unbound kernel consumer" " on device removal\n"); } else spin_unlock_irq(&pps_kc_hardpps_lock); diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c index 22a65ad4e46e..2d008e0d116a 100644 --- a/drivers/pps/pps.c +++ b/drivers/pps/pps.c @@ -25,7 +25,7 @@ * Local variables */ -static dev_t pps_devt; +static int pps_major; static struct class *pps_class; static DEFINE_MUTEX(pps_idr_lock); @@ -62,7 +62,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata) else { unsigned long ticks; - dev_dbg(pps->dev, "timeout %lld.%09d\n", + dev_dbg(&pps->dev, "timeout %lld.%09d\n", (long long) fdata->timeout.sec, fdata->timeout.nsec); ticks = fdata->timeout.sec * HZ; @@ -80,7 +80,7 @@ static int pps_cdev_pps_fetch(struct pps_device *pps, struct pps_fdata *fdata) /* Check for pending signals */ if (err == -ERESTARTSYS) { - dev_dbg(pps->dev, "pending signal caught\n"); + dev_dbg(&pps->dev, "pending signal caught\n"); return -EINTR; } @@ -98,7 +98,7 @@ static long pps_cdev_ioctl(struct file *file, switch (cmd) { case PPS_GETPARAMS: - dev_dbg(pps->dev, "PPS_GETPARAMS\n"); + dev_dbg(&pps->dev, "PPS_GETPARAMS\n"); spin_lock_irq(&pps->lock); @@ -114,7 +114,7 @@ static long pps_cdev_ioctl(struct file *file, break; case PPS_SETPARAMS: - dev_dbg(pps->dev, "PPS_SETPARAMS\n"); + dev_dbg(&pps->dev, "PPS_SETPARAMS\n"); /* Check the capabilities */ if (!capable(CAP_SYS_TIME)) @@ -124,14 +124,14 @@ static long pps_cdev_ioctl(struct file *file, if (err) return -EFAULT; if (!(params.mode & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR))) { - dev_dbg(pps->dev, "capture mode unspecified (%x)\n", + dev_dbg(&pps->dev, "capture mode unspecified (%x)\n", params.mode); return -EINVAL; } /* Check for supported capabilities */ if ((params.mode & ~pps->info.mode) != 0) { - dev_dbg(pps->dev, "unsupported capabilities (%x)\n", + dev_dbg(&pps->dev, "unsupported capabilities (%x)\n", params.mode); return -EINVAL; } @@ -144,7 +144,7 @@ static long pps_cdev_ioctl(struct file *file, /* Restore the read only parameters */ if ((params.mode & (PPS_TSFMT_TSPEC | PPS_TSFMT_NTPFP)) == 0) { /* section 3.3 of RFC 2783 interpreted */ - dev_dbg(pps->dev, "time format unspecified (%x)\n", + dev_dbg(&pps->dev, "time format unspecified (%x)\n", params.mode); pps->params.mode |= PPS_TSFMT_TSPEC; } @@ -165,7 +165,7 @@ static long pps_cdev_ioctl(struct file *file, break; case PPS_GETCAP: - dev_dbg(pps->dev, "PPS_GETCAP\n"); + dev_dbg(&pps->dev, "PPS_GETCAP\n"); err = put_user(pps->info.mode, iuarg); if (err) @@ -176,7 +176,7 @@ static long pps_cdev_ioctl(struct file *file, case PPS_FETCH: { struct pps_fdata fdata; - dev_dbg(pps->dev, "PPS_FETCH\n"); + dev_dbg(&pps->dev, "PPS_FETCH\n"); err = copy_from_user(&fdata, uarg, sizeof(struct pps_fdata)); if (err) @@ -206,7 +206,7 @@ static long pps_cdev_ioctl(struct file *file, case PPS_KC_BIND: { struct pps_bind_args bind_args; - dev_dbg(pps->dev, "PPS_KC_BIND\n"); + dev_dbg(&pps->dev, "PPS_KC_BIND\n"); /* Check the capabilities */ if (!capable(CAP_SYS_TIME)) @@ -218,7 +218,7 @@ static long pps_cdev_ioctl(struct file *file, /* Check for supported capabilities */ if ((bind_args.edge & ~pps->info.mode) != 0) { - dev_err(pps->dev, "unsupported capabilities (%x)\n", + dev_err(&pps->dev, "unsupported capabilities (%x)\n", bind_args.edge); return -EINVAL; } @@ -227,7 +227,7 @@ static long pps_cdev_ioctl(struct file *file, if (bind_args.tsformat != PPS_TSFMT_TSPEC || (bind_args.edge & ~PPS_CAPTUREBOTH) != 0 || bind_args.consumer != PPS_KC_HARDPPS) { - dev_err(pps->dev, "invalid kernel consumer bind" + dev_err(&pps->dev, "invalid kernel consumer bind" " parameters (%x)\n", bind_args.edge); return -EINVAL; } @@ -259,7 +259,7 @@ static long pps_cdev_compat_ioctl(struct file *file, struct pps_fdata fdata; int err; - dev_dbg(pps->dev, "PPS_FETCH\n"); + dev_dbg(&pps->dev, "PPS_FETCH\n"); err = copy_from_user(&compat, uarg, sizeof(struct pps_fdata_compat)); if (err) @@ -296,20 +296,36 @@ static long pps_cdev_compat_ioctl(struct file *file, #define pps_cdev_compat_ioctl NULL #endif +static struct pps_device *pps_idr_get(unsigned long id) +{ + struct pps_device *pps; + + mutex_lock(&pps_idr_lock); + pps = idr_find(&pps_idr, id); + if (pps) + get_device(&pps->dev); + + mutex_unlock(&pps_idr_lock); + return pps; +} + static int pps_cdev_open(struct inode *inode, struct file *file) { - struct pps_device *pps = container_of(inode->i_cdev, - struct pps_device, cdev); + struct pps_device *pps = pps_idr_get(iminor(inode)); + + if (!pps) + return -ENODEV; + file->private_data = pps; - kobject_get(&pps->dev->kobj); return 0; } static int pps_cdev_release(struct inode *inode, struct file *file) { - struct pps_device *pps = container_of(inode->i_cdev, - struct pps_device, cdev); - kobject_put(&pps->dev->kobj); + struct pps_device *pps = file->private_data; + + WARN_ON(pps->id != iminor(inode)); + put_device(&pps->dev); return 0; } @@ -332,22 +348,13 @@ static void pps_device_destruct(struct device *dev) { struct pps_device *pps = dev_get_drvdata(dev); - cdev_del(&pps->cdev); - - /* Now we can release the ID for re-use */ pr_debug("deallocating pps%d\n", pps->id); - mutex_lock(&pps_idr_lock); - idr_remove(&pps_idr, pps->id); - mutex_unlock(&pps_idr_lock); - - kfree(dev); kfree(pps); } int pps_register_cdev(struct pps_device *pps) { int err; - dev_t devt; mutex_lock(&pps_idr_lock); /* @@ -364,40 +371,29 @@ int pps_register_cdev(struct pps_device *pps) goto out_unlock; } pps->id = err; - mutex_unlock(&pps_idr_lock); - devt = MKDEV(MAJOR(pps_devt), pps->id); - - cdev_init(&pps->cdev, &pps_cdev_fops); - pps->cdev.owner = pps->info.owner; - - err = cdev_add(&pps->cdev, devt, 1); - if (err) { - pr_err("%s: failed to add char device %d:%d\n", - pps->info.name, MAJOR(pps_devt), pps->id); + pps->dev.class = pps_class; + pps->dev.parent = pps->info.dev; + pps->dev.devt = MKDEV(pps_major, pps->id); + dev_set_drvdata(&pps->dev, pps); + dev_set_name(&pps->dev, "pps%d", pps->id); + err = device_register(&pps->dev); + if (err) goto free_idr; - } - pps->dev = device_create(pps_class, pps->info.dev, devt, pps, - "pps%d", pps->id); - if (IS_ERR(pps->dev)) { - err = PTR_ERR(pps->dev); - goto del_cdev; - } /* Override the release function with our own */ - pps->dev->release = pps_device_destruct; + pps->dev.release = pps_device_destruct; - pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, - MAJOR(pps_devt), pps->id); + pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major, + pps->id); + get_device(&pps->dev); + mutex_unlock(&pps_idr_lock); return 0; -del_cdev: - cdev_del(&pps->cdev); - free_idr: - mutex_lock(&pps_idr_lock); idr_remove(&pps_idr, pps->id); + put_device(&pps->dev); out_unlock: mutex_unlock(&pps_idr_lock); return err; @@ -407,7 +403,13 @@ void pps_unregister_cdev(struct pps_device *pps) { pr_debug("unregistering pps%d\n", pps->id); pps->lookup_cookie = NULL; - device_destroy(pps_class, pps->dev->devt); + device_destroy(pps_class, pps->dev.devt); + + /* Now we can release the ID for re-use */ + mutex_lock(&pps_idr_lock); + idr_remove(&pps_idr, pps->id); + put_device(&pps->dev); + mutex_unlock(&pps_idr_lock); } /* @@ -427,6 +429,11 @@ void pps_unregister_cdev(struct pps_device *pps) * so that it will not be used again, even if the pps device cannot * be removed from the idr due to pending references holding the minor * number in use. + * + * Since pps_idr holds a reference to the device, the returned + * pps_device is guaranteed to be valid until pps_unregister_cdev() is + * called on it. But after calling pps_unregister_cdev(), it may be + * freed at any time. */ struct pps_device *pps_lookup_dev(void const *cookie) { @@ -449,13 +456,11 @@ EXPORT_SYMBOL(pps_lookup_dev); static void __exit pps_exit(void) { class_destroy(pps_class); - unregister_chrdev_region(pps_devt, PPS_MAX_SOURCES); + __unregister_chrdev(pps_major, 0, PPS_MAX_SOURCES, "pps"); } static int __init pps_init(void) { - int err; - pps_class = class_create(THIS_MODULE, "pps"); if (IS_ERR(pps_class)) { pr_err("failed to allocate class\n"); @@ -463,8 +468,9 @@ static int __init pps_init(void) } pps_class->dev_groups = pps_groups; - err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps"); - if (err < 0) { + pps_major = __register_chrdev(0, 0, PPS_MAX_SOURCES, "pps", + &pps_cdev_fops); + if (pps_major < 0) { pr_err("failed to allocate char device region\n"); goto remove_class; } @@ -477,8 +483,7 @@ static int __init pps_init(void) remove_class: class_destroy(pps_class); - - return err; + return pps_major; } subsys_initcall(pps_init); diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 8eb902fe73a9..6b3600356797 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -4,6 +4,7 @@ * * Copyright (C) 2010 OMICRON electronics GmbH */ +#include #include #include #include @@ -124,6 +125,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) struct timespec64 ts; int enable, err = 0; + if (in_compat_syscall() && cmd != PTP_ENABLE_PPS && cmd != PTP_ENABLE_PPS2) + arg = (unsigned long)compat_ptr(arg); + switch (cmd) { case PTP_CLOCK_GETCAPS: diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 3c3e4fbefeba..af66ed48dc00 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -136,7 +136,8 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) err = ops->adjfine(ops, tx->freq); else err = ops->adjfreq(ops, ppb); - ptp->dialed_frequency = tx->freq; + if (!err) + ptp->dialed_frequency = tx->freq; } else if (tx->modes & ADJ_OFFSET) { if (ops->adjphase) { s32 offset = tx->offset; @@ -187,6 +188,11 @@ static int ptp_getcycles64(struct ptp_clock_info *info, struct timespec64 *ts) return info->gettime64(info, ts); } +static int ptp_enable(struct ptp_clock_info *ptp, struct ptp_clock_request *request, int on) +{ + return -EOPNOTSUPP; +} + static void ptp_aux_kworker(struct kthread_work *work) { struct ptp_clock *ptp = container_of(work, struct ptp_clock, @@ -249,6 +255,9 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, ptp->info->getcrosscycles = ptp->info->getcrosststamp; } + if (!ptp->info->enable) + ptp->info->enable = ptp_enable; + if (ptp->info->do_aux_work) { kthread_init_delayed_work(&ptp->aux_work, ptp_aux_kworker); ptp->kworker = kthread_create_worker(0, "ptp%d", ptp->index); diff --git a/drivers/ptp/ptp_dte.c b/drivers/ptp/ptp_dte.c index 8641fd060491..7cc5a00e625b 100644 --- a/drivers/ptp/ptp_dte.c +++ b/drivers/ptp/ptp_dte.c @@ -134,8 +134,9 @@ static s64 dte_read_nco_with_ovf(struct ptp_dte *ptp_dte) return ns; } -static int ptp_dte_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +static int ptp_dte_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) { + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); u32 nco_incr; unsigned long flags; struct ptp_dte *ptp_dte = container_of(ptp, struct ptp_dte, caps); @@ -219,7 +220,7 @@ static const struct ptp_clock_info ptp_dte_caps = { .n_ext_ts = 0, .n_pins = 0, .pps = 0, - .adjfreq = ptp_dte_adjfreq, + .adjfine = ptp_dte_adjfine, .adjtime = ptp_dte_adjtime, .gettime64 = ptp_dte_gettime, .settime64 = ptp_dte_settime, diff --git a/drivers/ptp/ptp_kvm_arm.c b/drivers/ptp/ptp_kvm_arm.c index b7d28c8dfb84..e68e6943167b 100644 --- a/drivers/ptp/ptp_kvm_arm.c +++ b/drivers/ptp/ptp_kvm_arm.c @@ -22,6 +22,10 @@ int kvm_arch_ptp_init(void) return 0; } +void kvm_arch_ptp_exit(void) +{ +} + int kvm_arch_ptp_get_clock(struct timespec64 *ts) { return kvm_arch_ptp_get_crosststamp(NULL, ts, NULL); diff --git a/drivers/ptp/ptp_kvm_common.c b/drivers/ptp/ptp_kvm_common.c index fcae32f56f25..051114a59286 100644 --- a/drivers/ptp/ptp_kvm_common.c +++ b/drivers/ptp/ptp_kvm_common.c @@ -130,6 +130,7 @@ static struct kvm_ptp_clock kvm_ptp_clock; static void __exit ptp_kvm_exit(void) { ptp_clock_unregister(kvm_ptp_clock.ptp_clock); + kvm_arch_ptp_exit(); } static int __init ptp_kvm_init(void) diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c index 4991054a2135..5e5b2ef78547 100644 --- a/drivers/ptp/ptp_kvm_x86.c +++ b/drivers/ptp/ptp_kvm_x86.c @@ -14,27 +14,64 @@ #include #include #include +#include static phys_addr_t clock_pair_gpa; -static struct kvm_clock_pairing clock_pair; +static struct kvm_clock_pairing clock_pair_glbl; +static struct kvm_clock_pairing *clock_pair; int kvm_arch_ptp_init(void) { + struct page *p; long ret; if (!kvm_para_available()) - return -ENODEV; + return -EOPNOTSUPP; - clock_pair_gpa = slow_virt_to_phys(&clock_pair); - if (!pvclock_get_pvti_cpu0_va()) - return -ENODEV; + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { + p = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + clock_pair = page_address(p); + ret = set_memory_decrypted((unsigned long)clock_pair, 1); + if (ret) { + __free_page(p); + clock_pair = NULL; + goto nofree; + } + } else { + clock_pair = &clock_pair_glbl; + } + + clock_pair_gpa = slow_virt_to_phys(clock_pair); + if (!pvclock_get_pvti_cpu0_va()) { + ret = -EOPNOTSUPP; + goto err; + } ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, KVM_CLOCK_PAIRING_WALLCLOCK); - if (ret == -KVM_ENOSYS) - return -ENODEV; + if (ret == -KVM_ENOSYS) { + ret = -EOPNOTSUPP; + goto err; + } return ret; + +err: + kvm_arch_ptp_exit(); +nofree: + return ret; +} + +void kvm_arch_ptp_exit(void) +{ + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { + WARN_ON(set_memory_encrypted((unsigned long)clock_pair, 1)); + free_page((unsigned long)clock_pair); + clock_pair = NULL; + } } int kvm_arch_ptp_get_clock(struct timespec64 *ts) @@ -49,8 +86,8 @@ int kvm_arch_ptp_get_clock(struct timespec64 *ts) return -EOPNOTSUPP; } - ts->tv_sec = clock_pair.sec; - ts->tv_nsec = clock_pair.nsec; + ts->tv_sec = clock_pair->sec; + ts->tv_nsec = clock_pair->nsec; return 0; } @@ -81,9 +118,9 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, pr_err_ratelimited("clock pairing hypercall ret %lu\n", ret); return -EOPNOTSUPP; } - tspec->tv_sec = clock_pair.sec; - tspec->tv_nsec = clock_pair.nsec; - *cycle = __pvclock_read_cycles(src, clock_pair.tsc); + tspec->tv_sec = clock_pair->sec; + tspec->tv_nsec = clock_pair->nsec; + *cycle = __pvclock_read_cycles(src, clock_pair->tsc); } while (pvclock_read_retry(src, version)); *cs = &kvm_clock; diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 8fee9b330b61..b6f66a9886ce 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -1654,6 +1654,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s) if (!s->start) { /* roundup() does not work on 32-bit systems */ s->start = DIV64_U64_ROUND_UP(start_ns, s->period); + s->start *= s->period; s->start = ktime_add(s->start, s->phase); } @@ -3589,7 +3590,7 @@ ptp_ocp_complete(struct ptp_ocp *bp) pps = pps_lookup_dev(bp->ptp); if (pps) - ptp_ocp_symlink(bp, pps->dev, "pps"); + ptp_ocp_symlink(bp, &pps->dev, "pps"); ptp_ocp_debugfs_add_device(bp); diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c index 0247757f9a72..6751f3d00560 100644 --- a/drivers/pwm/pwm-fsl-ftm.c +++ b/drivers/pwm/pwm-fsl-ftm.c @@ -123,6 +123,9 @@ static unsigned int fsl_pwm_ticks_to_ns(struct fsl_pwm_chip *fpc, unsigned long long exval; rate = clk_get_rate(fpc->clk[fpc->period.clk_select]); + if (rate >> fpc->period.clk_ps == 0) + return 0; + exval = ticks; exval *= 1000000000UL; do_div(exval, rate >> fpc->period.clk_ps); @@ -195,6 +198,9 @@ static unsigned int fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc, unsigned int period = fpc->period.mod_period + 1; unsigned int period_ns = fsl_pwm_ticks_to_ns(fpc, period); + if (!period_ns) + return 0; + duty = (unsigned long long)duty_ns * period; do_div(duty, period_ns); diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c index 3a22c2fddc45..0cd42ae5c0a4 100644 --- a/drivers/pwm/pwm-imx27.c +++ b/drivers/pwm/pwm-imx27.c @@ -26,6 +26,7 @@ #define MX3_PWMSR 0x04 /* PWM Status Register */ #define MX3_PWMSAR 0x0C /* PWM Sample Register */ #define MX3_PWMPR 0x10 /* PWM Period Register */ +#define MX3_PWMCNR 0x14 /* PWM Counter Register */ #define MX3_PWMCR_FWM GENMASK(27, 26) #define MX3_PWMCR_STOPEN BIT(25) @@ -217,11 +218,13 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip, static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { - unsigned long period_cycles, duty_cycles, prescale; + unsigned long period_cycles, duty_cycles, prescale, period_us, tmp; struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip); struct pwm_state cstate; unsigned long long c; unsigned long long clkrate; + unsigned long flags; + int val; int ret; u32 cr; @@ -264,7 +267,98 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm, pwm_imx27_sw_reset(chip); } - writel(duty_cycles, imx->mmio_base + MX3_PWMSAR); + val = readl(imx->mmio_base + MX3_PWMPR); + val = val >= MX3_PWMPR_MAX ? MX3_PWMPR_MAX : val; + cr = readl(imx->mmio_base + MX3_PWMCR); + tmp = NSEC_PER_SEC * (u64)(val + 2) * MX3_PWMCR_PRESCALER_GET(cr); + tmp = DIV_ROUND_UP_ULL(tmp, clkrate); + period_us = DIV_ROUND_UP_ULL(tmp, 1000); + + /* + * ERR051198: + * PWM: PWM output may not function correctly if the FIFO is empty when + * a new SAR value is programmed + * + * Description: + * When the PWM FIFO is empty, a new value programmed to the PWM Sample + * register (PWM_PWMSAR) will be directly applied even if the current + * timer period has not expired. + * + * If the new SAMPLE value programmed in the PWM_PWMSAR register is + * less than the previous value, and the PWM counter register + * (PWM_PWMCNR) that contains the current COUNT value is greater than + * the new programmed SAMPLE value, the current period will not flip + * the level. This may result in an output pulse with a duty cycle of + * 100%. + * + * Consider a change from + * ________ + * / \______/ + * ^ * ^ + * to + * ____ + * / \__________/ + * ^ ^ + * At the time marked by *, the new write value will be directly applied + * to SAR even the current period is not over if FIFO is empty. + * + * ________ ____________________ + * / \______/ \__________/ + * ^ ^ * ^ ^ + * |<-- old SAR -->| |<-- new SAR -->| + * + * That is the output is active for a whole period. + * + * Workaround: + * Check new SAR less than old SAR and current counter is in errata + * windows, write extra old SAR into FIFO and new SAR will effect at + * next period. + * + * Sometime period is quite long, such as over 1 second. If add old SAR + * into FIFO unconditional, new SAR have to wait for next period. It + * may be too long. + * + * Turn off the interrupt to ensure that not IRQ and schedule happen + * during above operations. If any irq and schedule happen, counter + * in PWM will be out of data and take wrong action. + * + * Add a safety margin 1.5us because it needs some time to complete + * IO write. + * + * Use writel_relaxed() to minimize the interval between two writes to + * the SAR register to increase the fastest PWM frequency supported. + * + * When the PWM period is longer than 2us(or <500kHz), this workaround + * can solve this problem. No software workaround is available if PWM + * period is shorter than IO write. Just try best to fill old data + * into FIFO. + */ + c = clkrate * 1500; + do_div(c, NSEC_PER_SEC); + + local_irq_save(flags); + val = FIELD_GET(MX3_PWMSR_FIFOAV, readl_relaxed(imx->mmio_base + MX3_PWMSR)); + + if (duty_cycles < imx->duty_cycle && (cr & MX3_PWMCR_EN)) { + if (period_us < 2) { /* 2us = 500 kHz */ + /* Best effort attempt to fix up >500 kHz case */ + udelay(3 * period_us); + writel_relaxed(imx->duty_cycle, imx->mmio_base + MX3_PWMSAR); + writel_relaxed(imx->duty_cycle, imx->mmio_base + MX3_PWMSAR); + } else if (val < MX3_PWMSR_FIFOAV_2WORDS) { + val = readl_relaxed(imx->mmio_base + MX3_PWMCNR); + /* + * If counter is close to period, controller may roll over when + * next IO write. + */ + if ((val + c >= duty_cycles && val < imx->duty_cycle) || + val + c >= period_cycles) + writel_relaxed(imx->duty_cycle, imx->mmio_base + MX3_PWMSAR); + } + } + writel_relaxed(duty_cycles, imx->mmio_base + MX3_PWMSAR); + local_irq_restore(flags); + writel(period_cycles, imx->mmio_base + MX3_PWMPR); /* diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index a337b47dc2f7..10c2ed23f551 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -120,21 +120,25 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_mediatek_chip *pc = to_pwm_mediatek_chip(chip); u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH, reg_thres = PWMTHRES; + unsigned long clk_rate; u64 resolution; int ret; ret = pwm_mediatek_clk_enable(chip, pwm); - if (ret < 0) return ret; + clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]); + if (!clk_rate) + return -EINVAL; + /* Make sure we use the bus clock and not the 26MHz clock */ if (pc->soc->has_ck_26m_sel) writel(0, pc->regs + PWM_CK_26M_SEL); /* Using resolution in picosecond gets accuracy higher */ resolution = (u64)NSEC_PER_SEC * 1000; - do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm])); + do_div(resolution, clk_rate); cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution); while (cnt_period > 8191) { diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c index 55f46d09602b..b2e75d870532 100644 --- a/drivers/pwm/pwm-rcar.c +++ b/drivers/pwm/pwm-rcar.c @@ -8,6 +8,7 @@ * - The hardware cannot generate a 0% duty cycle. */ +#include #include #include #include @@ -103,23 +104,24 @@ static void rcar_pwm_set_clock_control(struct rcar_pwm_chip *rp, rcar_pwm_write(rp, value, RCAR_PWMCR); } -static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, int duty_ns, - int period_ns) +static int rcar_pwm_set_counter(struct rcar_pwm_chip *rp, int div, u64 duty_ns, + u64 period_ns) { - unsigned long long one_cycle, tmp; /* 0.01 nanoseconds */ + unsigned long long tmp; unsigned long clk_rate = clk_get_rate(rp->clk); u32 cyc, ph; - one_cycle = NSEC_PER_SEC * 100ULL << div; - do_div(one_cycle, clk_rate); + /* div <= 24 == RCAR_PWM_MAX_DIVISION, so the shift doesn't overflow. */ + tmp = mul_u64_u64_div_u64(period_ns, clk_rate, (u64)NSEC_PER_SEC << div); + if (tmp > FIELD_MAX(RCAR_PWMCNT_CYC0_MASK)) + tmp = FIELD_MAX(RCAR_PWMCNT_CYC0_MASK); - tmp = period_ns * 100ULL; - do_div(tmp, one_cycle); - cyc = (tmp << RCAR_PWMCNT_CYC0_SHIFT) & RCAR_PWMCNT_CYC0_MASK; + cyc = FIELD_PREP(RCAR_PWMCNT_CYC0_MASK, tmp); - tmp = duty_ns * 100ULL; - do_div(tmp, one_cycle); - ph = tmp & RCAR_PWMCNT_PH0_MASK; + tmp = mul_u64_u64_div_u64(duty_ns, clk_rate, (u64)NSEC_PER_SEC << div); + if (tmp > FIELD_MAX(RCAR_PWMCNT_PH0_MASK)) + tmp = FIELD_MAX(RCAR_PWMCNT_PH0_MASK); + ph = FIELD_PREP(RCAR_PWMCNT_PH0_MASK, tmp); /* Avoid prohibited setting */ if (cyc == 0 || ph == 0) diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 31a185c6b8da..7f477082db1d 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -169,8 +169,12 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip, regmap_read(priv->regmap, STM32_LPTIM_CR, &val); state->enabled = !!FIELD_GET(STM32_LPTIM_ENABLE, val); /* Keep PWM counter clock refcount in sync with PWM initial state */ - if (state->enabled) - clk_enable(priv->clk); + if (state->enabled) { + int ret = clk_enable(priv->clk); + + if (ret) + return ret; + } regmap_read(priv->regmap, STM32_LPTIM_CFGR, &val); presc = FIELD_GET(STM32_LPTIM_PRESC, val); diff --git a/drivers/pwm/pwm-stm32.c b/drivers/pwm/pwm-stm32.c index 2070d107c632..fda7d76f08b1 100644 --- a/drivers/pwm/pwm-stm32.c +++ b/drivers/pwm/pwm-stm32.c @@ -631,8 +631,11 @@ static int stm32_pwm_probe(struct platform_device *pdev) priv->chip.npwm = stm32_pwm_detect_channels(priv, &num_enabled); /* Initialize clock refcount to number of enabled PWM channels. */ - for (i = 0; i < num_enabled; i++) - clk_enable(priv->clk); + for (i = 0; i < num_enabled; i++) { + ret = clk_enable(priv->clk); + if (ret) + return ret; + } ret = pwmchip_add(&priv->chip); if (ret < 0) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 43db495f1986..bfd5d3ccdce2 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -1740,7 +1740,8 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, err = rio_add_net(net); if (err) { rmcd_debug(RDEV, "failed to register net, err=%d", err); - kfree(net); + put_device(&net->dev); + mport->net = NULL; goto cleanup; } } diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index fdcf742b2adb..c12941f71e2c 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -871,7 +871,10 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport, dev_set_name(&net->dev, "rnet_%d", net->id); net->dev.parent = &mport->dev; net->dev.release = rio_scan_release_dev; - rio_add_net(net); + if (rio_add_net(net)) { + put_device(&net->dev); + net = NULL; + } } return net; diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c index 75f432f61e91..f4d6e62bd963 100644 --- a/drivers/regulator/ad5398.c +++ b/drivers/regulator/ad5398.c @@ -14,6 +14,7 @@ #include #include #include +#include #define AD5398_CURRENT_EN_MASK 0x8000 @@ -221,15 +222,20 @@ static int ad5398_probe(struct i2c_client *client, const struct ad5398_current_data_format *df = (struct ad5398_current_data_format *)id->driver_data; - if (!init_data) - return -EINVAL; - chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; config.dev = &client->dev; + if (client->dev.of_node) + init_data = of_get_regulator_init_data(&client->dev, + client->dev.of_node, + &ad5398_reg); + if (!init_data) + return -EINVAL; + config.init_data = init_data; + config.of_node = client->dev.of_node; config.driver_data = chip; chip->client = client; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 3bdf9a7b13ac..357fbe297784 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2097,6 +2097,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) if (have_full_constraints()) { r = dummy_regulator_rdev; + if (!r) { + ret = -EPROBE_DEFER; + goto out; + } get_device(&r->dev); } else { dev_err(dev, "Failed to resolve %s-supply for %s\n", @@ -2114,6 +2118,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) goto out; } r = dummy_regulator_rdev; + if (!r) { + ret = -EPROBE_DEFER; + goto out; + } get_device(&r->dev); } @@ -2222,8 +2230,10 @@ struct regulator *_regulator_get(struct device *dev, const char *id, * enabled, even if it isn't hooked up, and just * provide a dummy. */ - dev_warn(dev, "supply %s not found, using dummy regulator\n", id); rdev = dummy_regulator_rdev; + if (!rdev) + return ERR_PTR(-EPROBE_DEFER); + dev_warn(dev, "supply %s not found, using dummy regulator\n", id); get_device(&rdev->dev); break; @@ -4903,7 +4913,7 @@ int regulator_bulk_get(struct device *dev, int num_consumers, consumers[i].supply); if (IS_ERR(consumers[i].consumer)) { ret = dev_err_probe(dev, PTR_ERR(consumers[i].consumer), - "Failed to get supply '%s'", + "Failed to get supply '%s'\n", consumers[i].supply); consumers[i].consumer = NULL; goto err; diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c index b8bf76c170fe..332fb58f9095 100644 --- a/drivers/regulator/max20086-regulator.c +++ b/drivers/regulator/max20086-regulator.c @@ -133,7 +133,7 @@ static int max20086_regulators_register(struct max20086 *chip) static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) { - struct of_regulator_match matches[MAX20086_MAX_REGULATORS] = { }; + struct of_regulator_match *matches; struct device_node *node; unsigned int i; int ret; @@ -144,6 +144,11 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) return -ENODEV; } + matches = devm_kcalloc(chip->dev, chip->info->num_outputs, + sizeof(*matches), GFP_KERNEL); + if (!matches) + return -ENOMEM; + for (i = 0; i < chip->info->num_outputs; ++i) matches[i].name = max20086_output_names[i]; diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c index 59e71fd0db43..f23c12f4ffbf 100644 --- a/drivers/regulator/of_regulator.c +++ b/drivers/regulator/of_regulator.c @@ -435,7 +435,7 @@ int of_regulator_match(struct device *dev, struct device_node *node, "failed to parse DT for regulator %pOFn\n", child); of_node_put(child); - return -EINVAL; + goto err_put; } match->of_node = of_node_get(child); count++; @@ -444,6 +444,18 @@ int of_regulator_match(struct device *dev, struct device_node *node, } return count; + +err_put: + for (i = 0; i < num_matches; i++) { + struct of_regulator_match *match = &matches[i]; + + match->init_data = NULL; + if (match->of_node) { + of_node_put(match->of_node); + match->of_node = NULL; + } + } + return -EINVAL; } EXPORT_SYMBOL_GPL(of_regulator_match); diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 784d9244f7c1..7af6c8c10939 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1209,6 +1209,8 @@ static const struct regulator_desc rk809_reg[] = { .n_linear_ranges = ARRAY_SIZE(rk817_buck1_voltage_ranges), .vsel_reg = RK817_BUCK3_ON_VSEL_REG, .vsel_mask = RK817_BUCK_VSEL_MASK, + .apply_reg = RK817_POWER_CONFIG, + .apply_bit = RK817_BUCK3_FB_RES_INTER, .enable_reg = RK817_POWER_EN_REG(0), .enable_mask = ENABLE_MASK(RK817_ID_DCDC3), .enable_val = BIT(RK817_ID_DCDC3), diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 7dbab5fcbe1e..5c9acc55b08a 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -1118,6 +1118,9 @@ static int q6v5_mba_load(struct q6v5 *qproc) goto disable_active_clks; } + if (qproc->has_mba_logs) + qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE); + writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); if (qproc->dp_size) { writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); @@ -1128,9 +1131,6 @@ static int q6v5_mba_load(struct q6v5 *qproc) if (ret) goto reclaim_mba; - if (qproc->has_mba_logs) - qcom_pil_info_store("mba", qproc->mba_phys, MBA_LOG_SIZE); - ret = q6v5_rmb_mba_wait(qproc, 0, 5000); if (ret == -ETIMEDOUT) { dev_err(qproc->dev, "MBA boot timed out\n"); @@ -1786,6 +1786,13 @@ static int q6v5_pds_attach(struct device *dev, struct device **devs, while (pd_names[num_pds]) num_pds++; + /* Handle single power domain */ + if (num_pds == 1 && dev->pm_domain) { + devs[0] = dev; + pm_runtime_enable(dev); + return 1; + } + for (i = 0; i < num_pds; i++) { devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) { @@ -1806,8 +1813,15 @@ unroll_attach: static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds, size_t pd_count) { + struct device *dev = qproc->dev; int i; + /* Handle single power domain */ + if (pd_count == 1 && dev->pm_domain) { + pm_runtime_disable(dev); + return; + } + for (i = 0; i < pd_count; i++) dev_pm_domain_detach(pds[i], false); } @@ -2313,13 +2327,13 @@ static const struct rproc_hexagon_res msm8974_mss = { .supply = "pll", .uA = 100000, }, - {} - }, - .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "mx", .uV = 1050000, }, + {} + }, + .fallback_proxy_supply = (struct qcom_mss_reg_res[]) { { .supply = "cx", .uA = 100000, @@ -2345,7 +2359,6 @@ static const struct rproc_hexagon_res msm8974_mss = { NULL }, .proxy_pd_names = (char*[]){ - "mx", "cx", NULL }, diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index dc6f07ca8341..679e89db85a0 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -37,7 +37,6 @@ struct adsp_data { const char *firmware_name; int pas_id; unsigned int minidump_id; - bool has_aggre2_clk; bool auto_boot; bool decrypt_shutdown; @@ -68,7 +67,6 @@ struct qcom_adsp { int pas_id; unsigned int minidump_id; int crash_reason_smem; - bool has_aggre2_clk; bool decrypt_shutdown; const char *info_name; @@ -345,15 +343,13 @@ static int adsp_init_clock(struct qcom_adsp *adsp) return ret; } - if (adsp->has_aggre2_clk) { - adsp->aggre2_clk = devm_clk_get(adsp->dev, "aggre2"); - if (IS_ERR(adsp->aggre2_clk)) { - ret = PTR_ERR(adsp->aggre2_clk); - if (ret != -EPROBE_DEFER) - dev_err(adsp->dev, - "failed to get aggre2 clock"); - return ret; - } + adsp->aggre2_clk = devm_clk_get_optional(adsp->dev, "aggre2"); + if (IS_ERR(adsp->aggre2_clk)) { + ret = PTR_ERR(adsp->aggre2_clk); + if (ret != -EPROBE_DEFER) + dev_err(adsp->dev, + "failed to get aggre2 clock"); + return ret; } return 0; @@ -393,16 +389,16 @@ static int adsp_pds_attach(struct device *dev, struct device **devs, if (!pd_names) return 0; + while (pd_names[num_pds]) + num_pds++; + /* Handle single power domain */ - if (dev->pm_domain) { + if (num_pds == 1 && dev->pm_domain) { devs[0] = dev; pm_runtime_enable(dev); return 1; } - while (pd_names[num_pds]) - num_pds++; - for (i = 0; i < num_pds; i++) { devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]); if (IS_ERR_OR_NULL(devs[i])) { @@ -427,7 +423,7 @@ static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds, int i; /* Handle single power domain */ - if (dev->pm_domain && pd_count) { + if (pd_count == 1 && dev->pm_domain) { pm_runtime_disable(dev); return; } @@ -505,7 +501,6 @@ static int adsp_probe(struct platform_device *pdev) adsp->rproc = rproc; adsp->minidump_id = desc->minidump_id; adsp->pas_id = desc->pas_id; - adsp->has_aggre2_clk = desc->has_aggre2_clk; adsp->info_name = desc->sysmon_name; adsp->decrypt_shutdown = desc->decrypt_shutdown; platform_set_drvdata(pdev, adsp); @@ -585,7 +580,6 @@ static const struct adsp_data adsp_resource_init = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, - .has_aggre2_clk = false, .auto_boot = true, .ssr_name = "lpass", .sysmon_name = "adsp", @@ -596,7 +590,6 @@ static const struct adsp_data sdm845_adsp_resource_init = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, - .has_aggre2_clk = false, .auto_boot = true, .load_state = "adsp", .ssr_name = "lpass", @@ -608,7 +601,6 @@ static const struct adsp_data sm6350_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "lcx", @@ -625,7 +617,6 @@ static const struct adsp_data sm8150_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", @@ -641,7 +632,7 @@ static const struct adsp_data sm8250_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, - .has_aggre2_clk = false, + .minidump_id = 5, .auto_boot = true, .proxy_pd_names = (char*[]){ "lcx", @@ -658,7 +649,6 @@ static const struct adsp_data sm8350_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "lcx", @@ -675,7 +665,6 @@ static const struct adsp_data msm8996_adsp_resource = { .crash_reason_smem = 423, .firmware_name = "adsp.mdt", .pas_id = 1, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", @@ -690,7 +679,6 @@ static const struct adsp_data cdsp_resource_init = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, - .has_aggre2_clk = false, .auto_boot = true, .ssr_name = "cdsp", .sysmon_name = "cdsp", @@ -701,7 +689,6 @@ static const struct adsp_data sdm845_cdsp_resource_init = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, - .has_aggre2_clk = false, .auto_boot = true, .load_state = "cdsp", .ssr_name = "cdsp", @@ -713,7 +700,6 @@ static const struct adsp_data sm6350_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", @@ -730,7 +716,6 @@ static const struct adsp_data sm8150_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", @@ -746,7 +731,6 @@ static const struct adsp_data sm8250_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", @@ -762,7 +746,6 @@ static const struct adsp_data sc8280xp_nsp0_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "nsp", @@ -777,7 +760,6 @@ static const struct adsp_data sc8280xp_nsp1_resource = { .crash_reason_smem = 633, .firmware_name = "cdsp.mdt", .pas_id = 30, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "nsp", @@ -792,7 +774,7 @@ static const struct adsp_data sm8350_cdsp_resource = { .crash_reason_smem = 601, .firmware_name = "cdsp.mdt", .pas_id = 18, - .has_aggre2_clk = false, + .minidump_id = 7, .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", @@ -810,7 +792,6 @@ static const struct adsp_data mpss_resource_init = { .firmware_name = "modem.mdt", .pas_id = 4, .minidump_id = 3, - .has_aggre2_clk = false, .auto_boot = false, .proxy_pd_names = (char*[]){ "cx", @@ -827,7 +808,6 @@ static const struct adsp_data sc8180x_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .pas_id = 4, - .has_aggre2_clk = false, .auto_boot = false, .proxy_pd_names = (char*[]){ "cx", @@ -843,7 +823,6 @@ static const struct adsp_data slpi_resource_init = { .crash_reason_smem = 424, .firmware_name = "slpi.mdt", .pas_id = 12, - .has_aggre2_clk = true, .auto_boot = true, .proxy_pd_names = (char*[]){ "ssc_cx", @@ -858,7 +837,6 @@ static const struct adsp_data sm8150_slpi_resource = { .crash_reason_smem = 424, .firmware_name = "slpi.mdt", .pas_id = 12, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "lcx", @@ -875,7 +853,6 @@ static const struct adsp_data sm8250_slpi_resource = { .crash_reason_smem = 424, .firmware_name = "slpi.mdt", .pas_id = 12, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "lcx", @@ -892,7 +869,6 @@ static const struct adsp_data sm8350_slpi_resource = { .crash_reason_smem = 424, .firmware_name = "slpi.mdt", .pas_id = 12, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "lcx", @@ -919,7 +895,6 @@ static const struct adsp_data sdx55_mpss_resource = { .crash_reason_smem = 421, .firmware_name = "modem.mdt", .pas_id = 4, - .has_aggre2_clk = false, .auto_boot = true, .proxy_pd_names = (char*[]){ "cx", @@ -936,7 +911,6 @@ static const struct adsp_data sm8450_mpss_resource = { .firmware_name = "modem.mdt", .pas_id = 4, .minidump_id = 3, - .has_aggre2_clk = false, .auto_boot = false, .decrypt_shutdown = true, .proxy_pd_names = (char*[]){ diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 68f37296b151..af96541c9b69 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -117,10 +117,10 @@ static const struct wcnss_data pronto_v1_data = { .pmu_offset = 0x1004, .spare_offset = 0x1088, - .pd_names = { "mx", "cx" }, + .pd_names = { "cx", "mx" }, .vregs = (struct wcnss_vreg_info[]) { - { "vddmx", 950000, 1150000, 0 }, { "vddcx", .super_turbo = true}, + { "vddmx", 950000, 1150000, 0 }, { "vddpx", 1800000, 1800000, 0 }, }, .num_pd_vregs = 2, @@ -131,10 +131,10 @@ static const struct wcnss_data pronto_v2_data = { .pmu_offset = 0x1004, .spare_offset = 0x1088, - .pd_names = { "mx", "cx" }, + .pd_names = { "cx", "mx" }, .vregs = (struct wcnss_vreg_info[]) { - { "vddmx", 1287500, 1287500, 0 }, { "vddcx", .super_turbo = true }, + { "vddmx", 1287500, 1287500, 0 }, { "vddpx", 1800000, 1800000, 0 }, }, .num_pd_vregs = 2, @@ -386,8 +386,17 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev) static int wcnss_init_pds(struct qcom_wcnss *wcnss, const char * const pd_names[WCNSS_MAX_PDS]) { + struct device *dev = wcnss->dev; int i, ret; + /* Handle single power domain */ + if (dev->pm_domain) { + wcnss->pds[0] = dev; + wcnss->num_pds = 1; + pm_runtime_enable(dev); + return 0; + } + for (i = 0; i < WCNSS_MAX_PDS; i++) { if (!pd_names[i]) break; @@ -407,8 +416,15 @@ static int wcnss_init_pds(struct qcom_wcnss *wcnss, static void wcnss_release_pds(struct qcom_wcnss *wcnss) { + struct device *dev = wcnss->dev; int i; + /* Handle single power domain */ + if (wcnss->num_pds == 1 && dev->pm_domain) { + pm_runtime_disable(dev); + return; + } + for (i = 0; i < wcnss->num_pds; i++) dev_pm_domain_detach(wcnss->pds[i], false); } @@ -426,10 +442,14 @@ static int wcnss_init_regulators(struct qcom_wcnss *wcnss, * the regulators for the power domains. For old device trees we need to * reserve extra space to manage them through the regulator interface. */ - if (wcnss->num_pds) - info += num_pd_vregs; - else + if (wcnss->num_pds) { + info += wcnss->num_pds; + /* Handle single power domain case */ + if (wcnss->num_pds < num_pd_vregs) + num_vregs += num_pd_vregs - wcnss->num_pds; + } else { num_vregs += num_pd_vregs; + } bulk = devm_kcalloc(wcnss->dev, num_vregs, sizeof(struct regulator_bulk_data), diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index c3f194d9384d..beae269e80b3 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -2023,6 +2023,7 @@ int rproc_shutdown(struct rproc *rproc) kfree(rproc->cached_table); rproc->cached_table = NULL; rproc->table_ptr = NULL; + rproc->table_sz = 0; out: mutex_unlock(&rproc->lock); return ret; @@ -2464,6 +2465,13 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->dev.driver_data = rproc; idr_init(&rproc->notifyids); + /* Assign a unique device index and name */ + rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL); + if (rproc->index < 0) { + dev_err(dev, "ida_alloc failed: %d\n", rproc->index); + goto put_device; + } + rproc->name = kstrdup_const(name, GFP_KERNEL); if (!rproc->name) goto put_device; @@ -2474,13 +2482,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, if (rproc_alloc_ops(rproc, ops)) goto put_device; - /* Assign a unique device index and name */ - rproc->index = ida_alloc(&rproc_dev_index, GFP_KERNEL); - if (rproc->index < 0) { - dev_err(dev, "ida_alloc failed: %d\n", rproc->index); - goto put_device; - } - dev_set_name(&rproc->dev, "remoteproc%d", rproc->index); atomic_set(&rproc->power, 0); diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index cb8f65c1d4e3..c838be098e86 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c @@ -189,20 +189,20 @@ struct glink_channel { static const struct rpmsg_endpoint_ops glink_endpoint_ops; -#define RPM_CMD_VERSION 0 -#define RPM_CMD_VERSION_ACK 1 -#define RPM_CMD_OPEN 2 -#define RPM_CMD_CLOSE 3 -#define RPM_CMD_OPEN_ACK 4 -#define RPM_CMD_INTENT 5 -#define RPM_CMD_RX_DONE 6 -#define RPM_CMD_RX_INTENT_REQ 7 -#define RPM_CMD_RX_INTENT_REQ_ACK 8 -#define RPM_CMD_TX_DATA 9 -#define RPM_CMD_CLOSE_ACK 11 -#define RPM_CMD_TX_DATA_CONT 12 -#define RPM_CMD_READ_NOTIF 13 -#define RPM_CMD_RX_DONE_W_REUSE 14 +#define GLINK_CMD_VERSION 0 +#define GLINK_CMD_VERSION_ACK 1 +#define GLINK_CMD_OPEN 2 +#define GLINK_CMD_CLOSE 3 +#define GLINK_CMD_OPEN_ACK 4 +#define GLINK_CMD_INTENT 5 +#define GLINK_CMD_RX_DONE 6 +#define GLINK_CMD_RX_INTENT_REQ 7 +#define GLINK_CMD_RX_INTENT_REQ_ACK 8 +#define GLINK_CMD_TX_DATA 9 +#define GLINK_CMD_CLOSE_ACK 11 +#define GLINK_CMD_TX_DATA_CONT 12 +#define GLINK_CMD_READ_NOTIF 13 +#define GLINK_CMD_RX_DONE_W_REUSE 14 #define GLINK_FEATURE_INTENTLESS BIT(1) @@ -311,7 +311,7 @@ static void qcom_glink_send_read_notify(struct qcom_glink *glink) { struct glink_msg msg; - msg.cmd = cpu_to_le16(RPM_CMD_READ_NOTIF); + msg.cmd = cpu_to_le16(GLINK_CMD_READ_NOTIF); msg.param1 = 0; msg.param2 = 0; @@ -373,7 +373,7 @@ static int qcom_glink_send_version(struct qcom_glink *glink) { struct glink_msg msg; - msg.cmd = cpu_to_le16(RPM_CMD_VERSION); + msg.cmd = cpu_to_le16(GLINK_CMD_VERSION); msg.param1 = cpu_to_le16(GLINK_VERSION_1); msg.param2 = cpu_to_le32(glink->features); @@ -384,7 +384,7 @@ static void qcom_glink_send_version_ack(struct qcom_glink *glink) { struct glink_msg msg; - msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK); + msg.cmd = cpu_to_le16(GLINK_CMD_VERSION_ACK); msg.param1 = cpu_to_le16(GLINK_VERSION_1); msg.param2 = cpu_to_le32(glink->features); @@ -396,7 +396,7 @@ static void qcom_glink_send_open_ack(struct qcom_glink *glink, { struct glink_msg msg; - msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK); + msg.cmd = cpu_to_le16(GLINK_CMD_OPEN_ACK); msg.param1 = cpu_to_le16(channel->rcid); msg.param2 = cpu_to_le32(0); @@ -422,11 +422,11 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink, } /** - * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote + * qcom_glink_send_open_req() - send a GLINK_CMD_OPEN request to the remote * @glink: Ptr to the glink edge * @channel: Ptr to the channel that the open req is sent * - * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote. + * Allocates a local channel id and sends a GLINK_CMD_OPEN message to the remote. * Will return with refcount held, regardless of outcome. * * Return: 0 on success, negative errno otherwise. @@ -455,7 +455,7 @@ static int qcom_glink_send_open_req(struct qcom_glink *glink, channel->lcid = ret; - req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN); + req.msg.cmd = cpu_to_le16(GLINK_CMD_OPEN); req.msg.param1 = cpu_to_le16(channel->lcid); req.msg.param2 = cpu_to_le32(name_len); strcpy(req.name, channel->name); @@ -480,7 +480,7 @@ static void qcom_glink_send_close_req(struct qcom_glink *glink, { struct glink_msg req; - req.cmd = cpu_to_le16(RPM_CMD_CLOSE); + req.cmd = cpu_to_le16(GLINK_CMD_CLOSE); req.param1 = cpu_to_le16(channel->lcid); req.param2 = 0; @@ -492,7 +492,7 @@ static void qcom_glink_send_close_ack(struct qcom_glink *glink, { struct glink_msg req; - req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK); + req.cmd = cpu_to_le16(GLINK_CMD_CLOSE_ACK); req.param1 = cpu_to_le16(rcid); req.param2 = 0; @@ -523,7 +523,7 @@ static void qcom_glink_rx_done_work(struct work_struct *work) iid = intent->id; reuse = intent->reuse; - cmd.id = reuse ? RPM_CMD_RX_DONE_W_REUSE : RPM_CMD_RX_DONE; + cmd.id = reuse ? GLINK_CMD_RX_DONE_W_REUSE : GLINK_CMD_RX_DONE; cmd.lcid = cid; cmd.liid = iid; @@ -635,7 +635,7 @@ static int qcom_glink_send_intent_req_ack(struct qcom_glink *glink, { struct glink_msg msg; - msg.cmd = cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK); + msg.cmd = cpu_to_le16(GLINK_CMD_RX_INTENT_REQ_ACK); msg.param1 = cpu_to_le16(channel->lcid); msg.param2 = cpu_to_le32(granted); @@ -666,7 +666,7 @@ static int qcom_glink_advertise_intent(struct qcom_glink *glink, } __packed; struct command cmd; - cmd.id = cpu_to_le16(RPM_CMD_INTENT); + cmd.id = cpu_to_le16(GLINK_CMD_INTENT); cmd.lcid = cpu_to_le16(channel->lcid); cmd.count = cpu_to_le32(1); cmd.size = cpu_to_le32(intent->size); @@ -1031,42 +1031,43 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data) param2 = le32_to_cpu(msg.param2); switch (cmd) { - case RPM_CMD_VERSION: - case RPM_CMD_VERSION_ACK: - case RPM_CMD_CLOSE: - case RPM_CMD_CLOSE_ACK: - case RPM_CMD_RX_INTENT_REQ: + case GLINK_CMD_VERSION: + case GLINK_CMD_VERSION_ACK: + case GLINK_CMD_CLOSE: + case GLINK_CMD_CLOSE_ACK: + case GLINK_CMD_RX_INTENT_REQ: ret = qcom_glink_rx_defer(glink, 0); break; - case RPM_CMD_OPEN_ACK: + case GLINK_CMD_OPEN_ACK: ret = qcom_glink_rx_open_ack(glink, param1); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; - case RPM_CMD_OPEN: - ret = qcom_glink_rx_defer(glink, param2); + case GLINK_CMD_OPEN: + /* upper 16 bits of param2 are the "prio" field */ + ret = qcom_glink_rx_defer(glink, param2 & 0xffff); break; - case RPM_CMD_TX_DATA: - case RPM_CMD_TX_DATA_CONT: + case GLINK_CMD_TX_DATA: + case GLINK_CMD_TX_DATA_CONT: ret = qcom_glink_rx_data(glink, avail); break; - case RPM_CMD_READ_NOTIF: + case GLINK_CMD_READ_NOTIF: qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); mbox_send_message(glink->mbox_chan, NULL); mbox_client_txdone(glink->mbox_chan, 0); break; - case RPM_CMD_INTENT: + case GLINK_CMD_INTENT: qcom_glink_handle_intent(glink, param1, param2, avail); break; - case RPM_CMD_RX_DONE: + case GLINK_CMD_RX_DONE: qcom_glink_handle_rx_done(glink, param1, param2, false); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; - case RPM_CMD_RX_DONE_W_REUSE: + case GLINK_CMD_RX_DONE_W_REUSE: qcom_glink_handle_rx_done(glink, param1, param2, true); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; - case RPM_CMD_RX_INTENT_REQ_ACK: + case GLINK_CMD_RX_INTENT_REQ_ACK: qcom_glink_handle_intent_req_ack(glink, param1, param2); qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8)); break; @@ -1269,7 +1270,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, reinit_completion(&channel->intent_req_comp); - cmd.id = RPM_CMD_RX_INTENT_REQ; + cmd.id = GLINK_CMD_RX_INTENT_REQ; cmd.cid = channel->lcid; cmd.size = size; @@ -1343,7 +1344,7 @@ static int __qcom_glink_send(struct glink_channel *channel, chunk_size = SZ_8K; left_size = len - chunk_size; } - req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA); + req.msg.cmd = cpu_to_le16(GLINK_CMD_TX_DATA); req.msg.param1 = cpu_to_le16(channel->lcid); req.msg.param2 = cpu_to_le32(iid); req.chunk_size = cpu_to_le32(chunk_size); @@ -1365,7 +1366,7 @@ static int __qcom_glink_send(struct glink_channel *channel, chunk_size = SZ_8K; left_size -= chunk_size; - req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA_CONT); + req.msg.cmd = cpu_to_le16(GLINK_CMD_TX_DATA_CONT); req.msg.param1 = cpu_to_le16(channel->lcid); req.msg.param2 = cpu_to_le32(iid); req.chunk_size = cpu_to_le32(chunk_size); @@ -1631,22 +1632,22 @@ static void qcom_glink_work(struct work_struct *work) param2 = le32_to_cpu(msg->param2); switch (cmd) { - case RPM_CMD_VERSION: + case GLINK_CMD_VERSION: qcom_glink_receive_version(glink, param1, param2); break; - case RPM_CMD_VERSION_ACK: + case GLINK_CMD_VERSION_ACK: qcom_glink_receive_version_ack(glink, param1, param2); break; - case RPM_CMD_OPEN: + case GLINK_CMD_OPEN: qcom_glink_rx_open(glink, param1, msg->data); break; - case RPM_CMD_CLOSE: + case GLINK_CMD_CLOSE: qcom_glink_rx_close(glink, param1); break; - case RPM_CMD_CLOSE_ACK: + case GLINK_CMD_CLOSE_ACK: qcom_glink_rx_close_ack(glink, param1); break; - case RPM_CMD_RX_INTENT_REQ: + case GLINK_CMD_RX_INTENT_REQ: qcom_glink_handle_intent_req(glink, param1, param2); break; default: diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index c928037bf6f3..04ce689dae92 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -904,13 +904,18 @@ void rtc_timer_do_work(struct work_struct *work) struct timerqueue_node *next; ktime_t now; struct rtc_time tm; + int err; struct rtc_device *rtc = container_of(work, struct rtc_device, irqwork); mutex_lock(&rtc->ops_lock); again: - __rtc_read_time(rtc, &tm); + err = __rtc_read_time(rtc, &tm); + if (err) { + mutex_unlock(&rtc->ops_lock); + return; + } now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { if (next->expires > now) diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c index 2f8deb8c4cd3..da710a29e962 100644 --- a/drivers/rtc/rtc-ab-eoz9.c +++ b/drivers/rtc/rtc-ab-eoz9.c @@ -396,13 +396,6 @@ static int abeoz9z3_temp_read(struct device *dev, if (ret < 0) return ret; - if ((val & ABEOZ9_REG_CTRL_STATUS_V1F) || - (val & ABEOZ9_REG_CTRL_STATUS_V2F)) { - dev_err(dev, - "thermometer might be disabled due to low voltage\n"); - return -EINVAL; - } - switch (attr) { case hwmon_temp_input: ret = regmap_read(regmap, ABEOZ9_REG_REG_TEMP, &val); diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c index 9b0138d07232..2ea6fdd2ae98 100644 --- a/drivers/rtc/rtc-abx80x.c +++ b/drivers/rtc/rtc-abx80x.c @@ -37,7 +37,7 @@ #define ABX8XX_REG_STATUS 0x0f #define ABX8XX_STATUS_AF BIT(2) #define ABX8XX_STATUS_BLF BIT(4) -#define ABX8XX_STATUS_WDT BIT(6) +#define ABX8XX_STATUS_WDT BIT(5) #define ABX8XX_REG_CTRL1 0x10 #define ABX8XX_CTRL_WRITE BIT(0) diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 542568cd72b3..5f43773900d1 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -645,18 +645,17 @@ static int cmos_nvram_read(void *priv, unsigned int off, void *val, unsigned char *buf = val; off += NVRAM_OFFSET; - spin_lock_irq(&rtc_lock); - for (; count; count--, off++) { + for (; count; count--, off++, buf++) { + guard(spinlock_irq)(&rtc_lock); if (off < 128) - *buf++ = CMOS_READ(off); + *buf = CMOS_READ(off); else if (can_bank2) - *buf++ = cmos_read_bank2(off); + *buf = cmos_read_bank2(off); else - break; + return -EIO; } - spin_unlock_irq(&rtc_lock); - return count ? -EIO : 0; + return 0; } static int cmos_nvram_write(void *priv, unsigned int off, void *val, @@ -671,23 +670,23 @@ static int cmos_nvram_write(void *priv, unsigned int off, void *val, * NVRAM to update, updating checksums is also part of its job. */ off += NVRAM_OFFSET; - spin_lock_irq(&rtc_lock); - for (; count; count--, off++) { + for (; count; count--, off++, buf++) { /* don't trash RTC registers */ if (off == cmos->day_alrm || off == cmos->mon_alrm || off == cmos->century) - buf++; - else if (off < 128) - CMOS_WRITE(*buf++, off); - else if (can_bank2) - cmos_write_bank2(*buf++, off); - else - break; - } - spin_unlock_irq(&rtc_lock); + continue; - return count ? -EIO : 0; + guard(spinlock_irq)(&rtc_lock); + if (off < 128) + CMOS_WRITE(*buf, off); + else if (can_bank2) + cmos_write_bank2(*buf, off); + else + return -EIO; + } + + return 0; } /*----------------------------------------------------------------*/ diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index d51565bcc189..b7f8b3f9b059 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -1802,10 +1802,8 @@ static int ds1307_probe(struct i2c_client *client, * For some variants, be sure alarms can trigger when we're * running on Vbackup (BBSQI/BBSQW) */ - if (want_irq || ds1307_can_wakeup_device) { + if (want_irq || ds1307_can_wakeup_device) regs[0] |= DS1337_BIT_INTCN | chip->bbsqi_bit; - regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE); - } regmap_write(ds1307->regmap, DS1337_REG_CONTROL, regs[0]); diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 754e03984f98..4a29b44e75e6 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -35,6 +35,7 @@ #define PCF85063_REG_CTRL1_CAP_SEL BIT(0) #define PCF85063_REG_CTRL1_STOP BIT(5) #define PCF85063_REG_CTRL1_EXT_TEST BIT(7) +#define PCF85063_REG_CTRL1_SWR 0x58 #define PCF85063_REG_CTRL2 0x01 #define PCF85063_CTRL2_AF BIT(6) @@ -322,7 +323,16 @@ static const struct rtc_class_ops pcf85063_rtc_ops = { static int pcf85063_nvmem_read(void *priv, unsigned int offset, void *val, size_t bytes) { - return regmap_read(priv, PCF85063_REG_RAM, val); + unsigned int tmp; + int ret; + + ret = regmap_read(priv, PCF85063_REG_RAM, &tmp); + if (ret < 0) + return ret; + + *(u8 *)val = tmp; + + return 0; } static int pcf85063_nvmem_write(void *priv, unsigned int offset, @@ -597,7 +607,7 @@ static int pcf85063_probe(struct i2c_client *client) i2c_set_clientdata(client, pcf85063); - err = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL1, &tmp); + err = regmap_read(pcf85063->regmap, PCF85063_REG_SC, &tmp); if (err) { dev_err(&client->dev, "RTC chip is not present\n"); return err; @@ -607,6 +617,22 @@ static int pcf85063_probe(struct i2c_client *client) if (IS_ERR(pcf85063->rtc)) return PTR_ERR(pcf85063->rtc); + /* + * If a Power loss is detected, SW reset the device. + * From PCF85063A datasheet: + * There is a low probability that some devices will have corruption + * of the registers after the automatic power-on reset... + */ + if (tmp & PCF85063_REG_SC_OS) { + dev_warn(&client->dev, + "POR issue detected, sending a SW reset\n"); + err = regmap_write(pcf85063->regmap, PCF85063_REG_CTRL1, + PCF85063_REG_CTRL1_SWR); + if (err < 0) + dev_warn(&client->dev, + "SW reset failed, trying to continue\n"); + } + err = pcf85063_load_capacitance(pcf85063, client->dev.of_node, config->force_cap_7000 ? 7000 : 0); if (err < 0) diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c index c3bee305eacc..9c85ecd9afb8 100644 --- a/drivers/rtc/rtc-rv3032.c +++ b/drivers/rtc/rtc-rv3032.c @@ -69,7 +69,7 @@ #define RV3032_CLKOUT2_FD_MSK GENMASK(6, 5) #define RV3032_CLKOUT2_OS BIT(7) -#define RV3032_CTRL1_EERD BIT(3) +#define RV3032_CTRL1_EERD BIT(2) #define RV3032_CTRL1_WADA BIT(5) #define RV3032_CTRL2_STOP BIT(0) diff --git a/drivers/rtc/rtc-rzn1.c b/drivers/rtc/rtc-rzn1.c index 0d36bc50197c..ff094b3ce9e3 100644 --- a/drivers/rtc/rtc-rzn1.c +++ b/drivers/rtc/rtc-rzn1.c @@ -111,8 +111,8 @@ static int rzn1_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_hour = bcd2bin(tm->tm_hour); tm->tm_wday = bcd2bin(tm->tm_wday); tm->tm_mday = bcd2bin(tm->tm_mday); - tm->tm_mon = bcd2bin(tm->tm_mon); - tm->tm_year = bcd2bin(tm->tm_year); + tm->tm_mon = bcd2bin(tm->tm_mon) - 1; + tm->tm_year = bcd2bin(tm->tm_year) + 100; return 0; } @@ -128,8 +128,8 @@ static int rzn1_rtc_set_time(struct device *dev, struct rtc_time *tm) tm->tm_hour = bin2bcd(tm->tm_hour); tm->tm_wday = bin2bcd(rzn1_rtc_tm_to_wday(tm)); tm->tm_mday = bin2bcd(tm->tm_mday); - tm->tm_mon = bin2bcd(tm->tm_mon); - tm->tm_year = bin2bcd(tm->tm_year); + tm->tm_mon = bin2bcd(tm->tm_mon + 1); + tm->tm_year = bin2bcd(tm->tm_year - 100); val = readl(rtc->base + RZN1_RTC_CTL2); if (!(val & RZN1_RTC_CTL2_STOPPED)) { diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c index d04d46f9cc65..23fbe25edeff 100644 --- a/drivers/rtc/rtc-st-lpc.c +++ b/drivers/rtc/rtc-st-lpc.c @@ -218,15 +218,14 @@ static int st_rtc_probe(struct platform_device *pdev) return -EINVAL; } - ret = devm_request_irq(&pdev->dev, rtc->irq, st_rtc_handler, 0, - pdev->name, rtc); + ret = devm_request_irq(&pdev->dev, rtc->irq, st_rtc_handler, + IRQF_NO_AUTOEN, pdev->name, rtc); if (ret) { dev_err(&pdev->dev, "Failed to request irq %i\n", rtc->irq); return ret; } enable_irq_wake(rtc->irq); - disable_irq(rtc->irq); rtc->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(rtc->clk)) { diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c index c9b85c838ebe..e8a2cad05e50 100644 --- a/drivers/rtc/rtc-zynqmp.c +++ b/drivers/rtc/rtc-zynqmp.c @@ -318,8 +318,8 @@ static int xlnx_rtc_probe(struct platform_device *pdev) return ret; } - /* Getting the rtc_clk info */ - xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc_clk"); + /* Getting the rtc info */ + xrtcdev->rtc_clk = devm_clk_get_optional(&pdev->dev, "rtc"); if (IS_ERR(xrtcdev->rtc_clk)) { if (PTR_ERR(xrtcdev->rtc_clk) != -EPROBE_DEFER) dev_warn(&pdev->dev, "Device clock not found.\n"); diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index e5d947c763ea..6a030ba38bf3 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -263,6 +263,19 @@ static struct console sclp_console = .index = 0 /* ttyS0 */ }; +/* + * Release allocated pages. + */ +static void __init __sclp_console_free_pages(void) +{ + struct list_head *page, *p; + + list_for_each_safe(page, p, &sclp_con_pages) { + list_del(page); + free_page((unsigned long)page); + } +} + /* * called by console_init() in drivers/char/tty_io.c at boot-time. */ @@ -282,6 +295,10 @@ sclp_console_init(void) /* Allocate pages for output buffering */ for (i = 0; i < sclp_console_pages; i++) { page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!page) { + __sclp_console_free_pages(); + return -ENOMEM; + } list_add_tail(page, &sclp_con_pages); } sclp_conbuf = NULL; diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 971fbb52740b..432dad2a2266 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -490,6 +490,17 @@ static const struct tty_operations sclp_ops = { .flush_buffer = sclp_tty_flush_buffer, }; +/* Release allocated pages. */ +static void __init __sclp_tty_free_pages(void) +{ + struct list_head *page, *p; + + list_for_each_safe(page, p, &sclp_tty_pages) { + list_del(page); + free_page((unsigned long)page); + } +} + static int __init sclp_tty_init(void) { @@ -516,6 +527,7 @@ sclp_tty_init(void) for (i = 0; i < MAX_KMEM_PAGES; i++) { page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); if (page == NULL) { + __sclp_tty_free_pages(); tty_driver_kref_put(driver); return -ENOMEM; } diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 5440f285f349..7e00c061538d 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -661,7 +661,8 @@ static int info_update(void) if (time_after(jiffies, chp_info_expires)) { /* Data is too old, update. */ rc = sclp_chp_read_info(&chp_info); - chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ; + if (!rc) + chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL; } mutex_unlock(&info_lock); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 923f5ca4f5e6..54bfa9fe3031 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -459,10 +459,14 @@ int cio_update_schib(struct subchannel *sch) { struct schib schib; - if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) + if (stsch(sch->schid, &schib)) return -ENODEV; memcpy(&sch->schib, &schib, sizeof(schib)); + + if (!css_sch_is_valid(&schib)) + return -EACCES; + return 0; } EXPORT_SYMBOL_GPL(cio_update_schib); diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 5666b9cc5d29..bdf5a50bd931 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1378,14 +1378,18 @@ enum io_sch_action { IO_SCH_VERIFY, IO_SCH_DISC, IO_SCH_NOP, + IO_SCH_ORPH_CDEV, }; static enum io_sch_action sch_get_action(struct subchannel *sch) { struct ccw_device *cdev; + int rc; cdev = sch_get_cdev(sch); - if (cio_update_schib(sch)) { + rc = cio_update_schib(sch); + + if (rc == -ENODEV) { /* Not operational. */ if (!cdev) return IO_SCH_UNREG; @@ -1393,6 +1397,16 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) return IO_SCH_UNREG; return IO_SCH_ORPH_UNREG; } + + /* Avoid unregistering subchannels without working device. */ + if (rc == -EACCES) { + if (!cdev) + return IO_SCH_NOP; + if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) + return IO_SCH_UNREG_CDEV; + return IO_SCH_ORPH_CDEV; + } + /* Operational. */ if (!cdev) return IO_SCH_ATTACH; @@ -1462,6 +1476,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) rc = 0; goto out_unlock; case IO_SCH_ORPH_UNREG: + case IO_SCH_ORPH_CDEV: case IO_SCH_ORPH_ATTACH: ccw_device_set_disconnected(cdev); break; @@ -1493,6 +1508,7 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) /* Handle attached ccw device. */ switch (action) { case IO_SCH_ORPH_UNREG: + case IO_SCH_ORPH_CDEV: case IO_SCH_ORPH_ATTACH: /* Move ccw device to orphanage. */ rc = ccw_device_move_to_orph(cdev); diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index 86a8bd532489..11fe917fbd9d 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -789,48 +789,66 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev) vfio_put_device(&matrix_mdev->vdev); } -#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \ - "already assigned to %s" +#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s" -static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev, - unsigned long *apm, - unsigned long *aqm) +#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev" + +static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee, + struct ap_matrix_mdev *assigned_to, + unsigned long *apm, unsigned long *aqm) { unsigned long apid, apqi; - const struct device *dev = mdev_dev(matrix_mdev->mdev); - const char *mdev_name = dev_name(dev); - for_each_set_bit_inv(apid, apm, AP_DEVICES) + for_each_set_bit_inv(apid, apm, AP_DEVICES) { + for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) { + dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR, + apid, apqi, dev_name(mdev_dev(assigned_to->mdev))); + } + } +} + +static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee, + unsigned long *apm, unsigned long *aqm) +{ + unsigned long apid, apqi; + + for_each_set_bit_inv(apid, apm, AP_DEVICES) { for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) - dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name); + dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi); + } } /** * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs * + * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being + * assigned; or, NULL if this function was called by the AP bus + * driver in_use callback to verify none of the APQNs being reserved + * for the host device driver are in use by a vfio_ap mediated device * @mdev_apm: mask indicating the APIDs of the APQNs to be verified * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified * - * Verifies that each APQN derived from the Cartesian product of a bitmap of - * AP adapter IDs and AP queue indexes is not configured for any matrix - * mediated device. AP queue sharing is not allowed. + * Verifies that each APQN derived from the Cartesian product of APIDs + * represented by the bits set in @mdev_apm and the APQIs of the bits set in + * @mdev_aqm is not assigned to a mediated device other than the mdev to which + * the APQN is being assigned (@assignee). AP queue sharing is not allowed. * * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE. */ -static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, +static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee, + unsigned long *mdev_apm, unsigned long *mdev_aqm) { - struct ap_matrix_mdev *matrix_mdev; + struct ap_matrix_mdev *assigned_to; DECLARE_BITMAP(apm, AP_DEVICES); DECLARE_BITMAP(aqm, AP_DOMAINS); - list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) { + list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) { /* - * If the input apm and aqm are fields of the matrix_mdev - * object, then move on to the next matrix_mdev. + * If the mdev to which the mdev_apm and mdev_aqm is being + * assigned is the same as the mdev being verified */ - if (mdev_apm == matrix_mdev->matrix.apm && - mdev_aqm == matrix_mdev->matrix.aqm) + if (assignee == assigned_to) continue; memset(apm, 0, sizeof(apm)); @@ -840,15 +858,16 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm, * We work on full longs, as we can only exclude the leftover * bits in non-inverse order. The leftover is all zeros. */ - if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm, - AP_DEVICES)) + if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES)) continue; - if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm, - AP_DOMAINS)) + if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS)) continue; - vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm); + if (assignee) + vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm); + else + vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm); return -EADDRINUSE; } @@ -877,7 +896,8 @@ static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev) matrix_mdev->matrix.aqm)) return -EADDRNOTAVAIL; - return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm, + return vfio_ap_mdev_verify_no_sharing(matrix_mdev, + matrix_mdev->matrix.apm, matrix_mdev->matrix.aqm); } @@ -1945,7 +1965,7 @@ int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm) mutex_lock(&matrix_dev->guests_lock); mutex_lock(&matrix_dev->mdevs_lock); - ret = vfio_ap_mdev_verify_no_sharing(apm, aqm); + ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm); mutex_unlock(&matrix_dev->mdevs_lock); mutex_unlock(&matrix_dev->guests_lock); diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index e5aa982ffedc..d4455ca3a7dd 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -1699,9 +1699,8 @@ bfad_init(void) error = bfad_im_module_init(); if (error) { - error = -ENOMEM; printk(KERN_WARNING "bfad_im_module_init failure\n"); - goto ext; + return -ENOMEM; } if (strcmp(FCPI_NAME, " fcpim") == 0) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 2116f5ee36e2..02855164bf28 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -865,8 +865,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work, container_of(work, typeof(*phy), works[event]); struct hisi_hba *hisi_hba = phy->hisi_hba; struct asd_sas_phy *sas_phy = &phy->sas_phy; + struct asd_sas_port *sas_port = sas_phy->port; + struct hisi_sas_port *port = phy->port; + struct device *dev = hisi_hba->dev; + struct domain_device *port_dev; int phy_no = sas_phy->id; + if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) && + sas_port && port && (port->id != phy->port_id)) { + dev_info(dev, "phy%d's hw port id changed from %d to %llu\n", + phy_no, port->id, phy->port_id); + port_dev = sas_port->port_dev; + if (port_dev && !dev_is_expander(port_dev->dev_type)) { + /* + * Set the device state to gone to block + * sending IO to the device. + */ + set_bit(SAS_DEV_GONE, &port_dev->state); + hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); + return; + } + } + phy->wait_phyup_cnt = 0; if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP) hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 50697672146a..ae39f6b5dc9a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -2502,6 +2502,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_port *port = to_hisi_sas_port(sas_port); struct sas_ata_task *ata_task = &task->ata_task; struct sas_tmf_task *tmf = slot->tmf; + int phy_id; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw0, dw1 = 0, dw2 = 0; @@ -2509,10 +2510,14 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, /* create header */ /* dw0 */ dw0 = port->id << CMD_HDR_PORT_OFF; - if (parent_dev && dev_is_expander(parent_dev->dev_type)) + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { dw0 |= 3 << CMD_HDR_CMD_OFF; - else + } else { + phy_id = device->phy->identify.phy_identifier; + dw0 |= (1U << phy_id) << CMD_HDR_PHY_ID_OFF; + dw0 |= CMD_HDR_FORCE_PHY_MSK; dw0 |= 4 << CMD_HDR_CMD_OFF; + } if (tmf && ata_task->force_phy) { dw0 |= CMD_HDR_FORCE_PHY_MSK; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 7ae56a2fe232..20b4d76e0714 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -357,6 +357,10 @@ #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF) #define CMD_HDR_TLR_CTRL_OFF 6 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF) +#define CMD_HDR_PHY_ID_OFF 8 +#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF) +#define CMD_HDR_FORCE_PHY_OFF 17 +#define CMD_HDR_FORCE_PHY_MSK (0x1U << CMD_HDR_FORCE_PHY_OFF) #define CMD_HDR_PORT_OFF 18 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF) #define CMD_HDR_PRIORITY_OFF 27 @@ -1385,15 +1389,21 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr; struct asd_sas_port *sas_port = device->port; struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + int phy_id; u8 *buf_cmd; int has_data = 0, hdr_tag = 0; u32 dw1 = 0, dw2 = 0; hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF); - if (parent_dev && dev_is_expander(parent_dev->dev_type)) + if (parent_dev && dev_is_expander(parent_dev->dev_type)) { hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF); - else + } else { + phy_id = device->phy->identify.phy_identifier; + hdr->dw0 |= cpu_to_le32((1U << phy_id) + << CMD_HDR_PHY_ID_OFF); + hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK; hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF); + } switch (task->data_dir) { case DMA_TO_DEVICE: @@ -2410,6 +2420,7 @@ static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + cond_resched(); return IRQ_HANDLED; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index aaa98a006fdc..b04112c77fcd 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -177,7 +177,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* Don't schedule a worker thread event if the vport is going down. * The teardown process cleans up the node via lpfc_drop_node. */ - if (vport->load_flag & FC_UNLOADING) { + if ((vport->load_flag & FC_UNLOADING) || + !(phba->hba_flag & HBA_SETUP)) { ((struct lpfc_rport_data *)rport->dd_data)->pnode = NULL; ndlp->rport = NULL; @@ -5618,6 +5619,7 @@ static struct lpfc_nodelist * __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) { struct lpfc_nodelist *ndlp; + struct lpfc_nodelist *np = NULL; uint32_t data1; list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { @@ -5632,14 +5634,20 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); - return ndlp; + + /* Check for new or potentially stale node */ + if (ndlp->nlp_state != NLP_STE_UNUSED_NODE) + return ndlp; + np = ndlp; } } - /* FIND node did NOT FOUND */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0932 FIND node did x%x NOT FOUND.\n", did); - return NULL; + if (!np) + /* FIND node did NOT FOUND */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0932 FIND node did x%x NOT FOUND.\n", did); + + return np; } struct lpfc_nodelist * @@ -6941,7 +6949,9 @@ lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) if (rc) return; /* Reset HBA FCF states after successful unregister FCF */ + spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag = 0; + spin_unlock_irq(&phba->hbalock); phba->fcf.current_rec.flag = 0; /* diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 1a0bafde34d8..97f3c5240d57 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -13204,6 +13204,7 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba) eqhdl = lpfc_get_eq_hdl(0); rc = pci_irq_vector(phba->pcidev, 0); if (rc < 0) { + free_irq(phba->pcidev->irq, phba); pci_free_irq_vectors(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0496 MSI pci_irq_vec failed (%d)\n", rc); @@ -13284,6 +13285,7 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) eqhdl = lpfc_get_eq_hdl(0); retval = pci_irq_vector(phba->pcidev, 0); if (retval < 0) { + free_irq(phba->pcidev->irq, phba); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0502 INTR pci_irq_vec failed (%d)\n", retval); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 2a81a42de5c1..ed32aa01c711 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -5554,11 +5554,20 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) iocb = &lpfc_cmd->cur_iocbq; if (phba->sli_rev == LPFC_SLI_REV4) { - pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; - if (!pring_s4) { + /* if the io_wq & pring are gone, the port was reset. */ + if (!phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq || + !phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, + "2877 SCSI Layer I/O Abort Request " + "IO CMPL Status x%x ID %d LUN %llu " + "HBA_SETUP %d\n", FAILED, + cmnd->device->id, + (u64)cmnd->device->lun, + (HBA_SETUP & phba->hba_flag)); ret = FAILED; goto out_unlock_hba; } + pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; spin_lock(&pring_s4->ring_lock); } /* the command is in process of being cancelled */ diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 587e3c2f7c48..1e04b6fc127a 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -4668,6 +4668,17 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_hdw_queue; i++) { + if (!phba->sli4_hba.hdwq || + !phba->sli4_hba.hdwq[i].io_wq) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "7777 hdwq's deleted %lx " + "%lx %x %x\n", + (unsigned long)phba->pport->load_flag, + (unsigned long)phba->hba_flag, + phba->link_state, + phba->sli.sli_flag); + return; + } pring = phba->sli4_hba.hdwq[i].io_wq->pring; spin_lock_irq(&pring->ring_lock); diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 37208bc08c66..247e84e989aa 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2101,6 +2101,9 @@ static int megasas_slave_configure(struct scsi_device *sdev) /* This sdev property may change post OCR */ megasas_set_dynamic_target_properties(sdev, is_target_prop); + if (!MEGASAS_IS_LOGICAL(sdev)) + sdev->no_vpd_size = 1; + mutex_unlock(&instance->reset_mutex); return 0; @@ -3661,8 +3664,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - cmd->scmd->result = - (DID_ERROR << 16) | hdr->scsi_status; + if (hdr->scsi_status == 0xf0) + cmd->scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION; + else + cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: @@ -8905,8 +8910,11 @@ megasas_aen_polling(struct work_struct *work) (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL), 0); - if (sdev1) + if (sdev1) { + mutex_unlock(&instance->reset_mutex); megasas_remove_scsi_device(sdev1); + mutex_lock(&instance->reset_mutex); + } event_type = SCAN_VD_CHANNEL; break; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 1475f3e259c1..4c79d699543e 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -2040,7 +2040,10 @@ map_cmd_status(struct fusion_context *fusion, case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_LD_INIT_IN_PROGRESS: - scmd->result = (DID_ERROR << 16) | ext_status; + if (ext_status == 0xf0) + scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION; + else + scmd->result = (DID_ERROR << 16) | ext_status; break; case MFI_STAT_SCSI_DONE_WITH_ERROR: diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 41636c4c43af..015a875a46a1 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc, char *desc = NULL; u16 event; + if (!(mrioc->logging_level & MPI3_DEBUG_EVENT)) + return; + event = event_reply->event; switch (event) { diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 03fcaf735939..b5a3694bfe7f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5649,8 +5649,7 @@ _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) { pr_err("%s: overriding NVDATA EEDPTagMode setting\n", ioc->name); - ioc->manu_pg11.EEDPTagMode &= ~0x3; - ioc->manu_pg11.EEDPTagMode |= 0x1; + ioc->manu_pg11.EEDPTagMode = 0x1; mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); } @@ -7061,11 +7060,12 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, int i; u8 failed; __le32 *mfp; + int ret_val; /* make sure doorbell is not in use */ if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__); - return -EFAULT; + goto doorbell_diag_reset; } /* clear pending doorbell interrupts from previous state changes */ @@ -7155,6 +7155,10 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, le32_to_cpu(mfp[i])); } return 0; + +doorbell_diag_reset: + ret_val = _base_diag_reset(ioc); + return ret_val; } /** diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index fc5af6a5114e..863503e8a4d1 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -679,6 +679,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, size_t data_in_sz = 0; long ret; u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; + int tm_ret; issue_reset = 0; @@ -1120,18 +1121,25 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, if (pcie_device && (!ioc->tm_custom_handling) && (!(mpt3sas_scsih_is_pcie_scsi_device( pcie_device->device_info)))) - mpt3sas_scsih_issue_locked_tm(ioc, + tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, pcie_device->reset_timeout, MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); else - mpt3sas_scsih_issue_locked_tm(ioc, + tm_ret = mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET); + + if (tm_ret != SUCCESS) { + ioc_info(ioc, + "target reset failed, issue hard reset: handle (0x%04x)\n", + le16_to_cpu(mpi_request->FunctionDependent1)); + mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + } } else mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 8e3f2f9ddaac..a87c3d7e3e5c 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -720,6 +720,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev) spin_lock_irqsave(&pm8001_ha->lock, flags); } PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id); + pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0; pm8001_free_dev(pm8001_dev); } else { pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n"); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 179967774cc8..288c96e7bc39 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2739,6 +2739,7 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDF_ERR(&qedf->dbg_ctx, "Status block initialization failed (0x%x) for id = %d.\n", ret, sb_id); diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 2ee109fb6561..825606c845e1 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -369,6 +369,7 @@ static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi, ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys, sb_id, QED_SB_TYPE_STORAGE); if (ret) { + dma_free_coherent(&qedi->pdev->dev, sizeof(*sb_virt), sb_virt, sb_phys); QEDI_ERR(&qedi->dbg_ctx, "Status block initialization failed for id = %d.\n", sb_id); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 1e7f4d138e06..0bb80d135f56 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -2866,7 +2866,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n", cpu_to_le32(upper_32_bits(dma_handle)), cpu_to_le32(lower_32_bits(dma_handle)), - cpu_to_le32(sg_dma_len(sg_next(s)))); + cpu_to_le32(sg_dma_len(s))); remseg--; } dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather " diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index d309e2ca14de..dea2290b37d4 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h @@ -116,12 +116,12 @@ struct device_reg { uint16_t id_h; /* ID high */ uint16_t cfg_0; /* Configuration 0 */ #define ISP_CFG0_HWMSK 0x000f /* Hardware revision mask */ -#define ISP_CFG0_1020 BIT_0 /* ISP1020 */ -#define ISP_CFG0_1020A BIT_1 /* ISP1020A */ -#define ISP_CFG0_1040 BIT_2 /* ISP1040 */ -#define ISP_CFG0_1040A BIT_3 /* ISP1040A */ -#define ISP_CFG0_1040B BIT_4 /* ISP1040B */ -#define ISP_CFG0_1040C BIT_5 /* ISP1040C */ +#define ISP_CFG0_1020 1 /* ISP1020 */ +#define ISP_CFG0_1020A 2 /* ISP1020A */ +#define ISP_CFG0_1040 3 /* ISP1040 */ +#define ISP_CFG0_1040A 4 /* ISP1040A */ +#define ISP_CFG0_1040B 5 /* ISP1040B */ +#define ISP_CFG0_1040C 6 /* ISP1040C */ uint16_t cfg_1; /* Configuration 1 */ #define ISP_CFG1_F128 BIT_6 /* 128-byte FIFO threshold */ #define ISP_CFG1_F64 BIT_4|BIT_5 /* 128-byte FIFO threshold */ diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 0d414c1aa84e..976dee7a0305 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -3312,6 +3312,7 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .show_host_node_name = 1, .show_host_port_name = 1, .show_host_supported_classes = 1, + .show_host_supported_speeds = 1, .get_host_port_id = qla2x00_get_host_port_id, .show_host_port_id = 1, diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 52dc9604f567..10431a67d202 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -24,6 +24,7 @@ void qla2x00_bsg_job_done(srb_t *sp, int res) { struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct completion *comp = sp->comp; ql_dbg(ql_dbg_user, sp->vha, 0x7009, "%s: sp hdl %x, result=%x bsg ptr %p\n", @@ -35,6 +36,9 @@ void qla2x00_bsg_job_done(srb_t *sp, int res) bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + if (comp) + complete(comp); } void qla2x00_bsg_sp_free(srb_t *sp) @@ -490,16 +494,6 @@ qla2x00_process_ct(struct bsg_job *bsg_job) goto done; } - if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) || - (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) { - ql_log(ql_log_warn, vha, 0x7011, - "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x " - "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt, - req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt); - rval = -EAGAIN; - goto done_unmap_sg; - } - if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x7012, "Host is not online.\n"); @@ -3061,7 +3055,7 @@ skip_chip_chk: static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job) { - bool found = false; + bool found, do_bsg_done; struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct qla_hw_data *ha = vha->hw; @@ -3069,6 +3063,11 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job) int cnt; unsigned long flags; struct req_que *req; + int rval; + DECLARE_COMPLETION_ONSTACK(comp); + uint32_t ratov_j; + + found = do_bsg_done = false; spin_lock_irqsave(qpair->qp_lock_ptr, flags); req = qpair->req; @@ -3080,42 +3079,104 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job) sp->type == SRB_ELS_CMD_HST || sp->type == SRB_ELS_CMD_HST_NOLOGIN) && sp->u.bsg_job == bsg_job) { - req->outstanding_cmds[cnt] = NULL; - spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - - if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { - ql_log(ql_log_warn, vha, 0x7089, - "mbx abort_command failed.\n"); - bsg_reply->result = -EIO; - } else { - ql_dbg(ql_dbg_user, vha, 0x708a, - "mbx abort_command success.\n"); - bsg_reply->result = 0; - } - /* ref: INIT */ - kref_put(&sp->cmd_kref, qla2x00_sp_release); found = true; - goto done; + sp->comp = ∁ + break; } } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); -done: - return found; + if (!found) + return false; + + if (ha->flags.eeh_busy) { + /* skip over abort. EEH handling will return the bsg. Wait for it */ + rval = QLA_SUCCESS; + ql_dbg(ql_dbg_user, vha, 0x802c, + "eeh encounter. bsg %p sp=%p handle=%x \n", + bsg_job, sp, sp->handle); + } else { + rval = ha->isp_ops->abort_command(sp); + ql_dbg(ql_dbg_user, vha, 0x802c, + "Aborting bsg %p sp=%p handle=%x rval=%x\n", + bsg_job, sp, sp->handle, rval); + } + + switch (rval) { + case QLA_SUCCESS: + /* Wait for the command completion. */ + ratov_j = ha->r_a_tov / 10 * 4 * 1000; + ratov_j = msecs_to_jiffies(ratov_j); + + if (!wait_for_completion_timeout(&comp, ratov_j)) { + ql_log(ql_log_info, vha, 0x7089, + "bsg abort timeout. bsg=%p sp=%p handle %#x .\n", + bsg_job, sp, sp->handle); + + do_bsg_done = true; + } else { + /* fw had returned the bsg */ + ql_dbg(ql_dbg_user, vha, 0x708a, + "bsg abort success. bsg %p sp=%p handle=%#x\n", + bsg_job, sp, sp->handle); + do_bsg_done = false; + } + break; + default: + ql_log(ql_log_info, vha, 0x704f, + "bsg abort fail. bsg=%p sp=%p rval=%x.\n", + bsg_job, sp, rval); + + do_bsg_done = true; + break; + } + + if (!do_bsg_done) + return true; + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + /* + * recheck to make sure it's still the same bsg_job due to + * qp_lock_ptr was released earlier. + */ + if (req->outstanding_cmds[cnt] && + req->outstanding_cmds[cnt]->u.bsg_job != bsg_job) { + /* fw had returned the bsg */ + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + return true; + } + req->outstanding_cmds[cnt] = NULL; + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + /* ref: INIT */ + sp->comp = NULL; + kref_put(&sp->cmd_kref, qla2x00_sp_release); + bsg_reply->result = -ENXIO; + bsg_reply->reply_payload_rcv_len = 0; + + ql_dbg(ql_dbg_user, vha, 0x7051, + "%s bsg_job_done : bsg %p result %#x sp %p.\n", + __func__, bsg_job, bsg_reply->result, sp); + + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + return true; } int qla24xx_bsg_timeout(struct bsg_job *bsg_job) { - struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct fc_bsg_request *bsg_request = bsg_job->request; scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct qla_hw_data *ha = vha->hw; int i; struct qla_qpair *qpair; - ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %p.\n", - __func__, bsg_job); + ql_log(ql_log_info, vha, 0x708b, + "%s CMD timeout. bsg ptr %p msgcode %x vendor cmd %x\n", + __func__, bsg_job, bsg_request->msgcode, + bsg_request->rqst_data.h_vendor.vendor_cmd[0]); if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x9007, @@ -3136,7 +3197,6 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) } ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n"); - bsg_reply->result = -ENXIO; done: return 0; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8490181424c7..f8b949eaffcf 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -4043,6 +4043,8 @@ struct qla_hw_data { uint32_t npiv_supported :1; uint32_t pci_channel_io_perm_failure :1; uint32_t fce_enabled :1; + uint32_t user_enabled_fce :1; + uint32_t fce_dump_buf_alloced :1; uint32_t fac_supported :1; uint32_t chip_reset_done :1; diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index 081af4d420a0..4a82b377928d 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -409,27 +409,32 @@ qla2x00_dfs_fce_show(struct seq_file *s, void *unused) mutex_lock(&ha->fce_mutex); - seq_puts(s, "FCE Trace Buffer\n"); - seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); - seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma); - seq_puts(s, "FCE Enable Registers\n"); - seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", - ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], - ha->fce_mb[5], ha->fce_mb[6]); + if (ha->flags.user_enabled_fce) { + seq_puts(s, "FCE Trace Buffer\n"); + seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr); + seq_printf(s, "Base = %llx\n\n", (unsigned long long)ha->fce_dma); + seq_puts(s, "FCE Enable Registers\n"); + seq_printf(s, "%08x %08x %08x %08x %08x %08x\n", + ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4], + ha->fce_mb[5], ha->fce_mb[6]); - fce = (uint32_t *) ha->fce; - fce_start = (unsigned long long) ha->fce_dma; - for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { - if (cnt % 8 == 0) - seq_printf(s, "\n%llx: ", - (unsigned long long)((cnt * 4) + fce_start)); - else - seq_putc(s, ' '); - seq_printf(s, "%08x", *fce++); + fce = (uint32_t *)ha->fce; + fce_start = (unsigned long long)ha->fce_dma; + for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) { + if (cnt % 8 == 0) + seq_printf(s, "\n%llx: ", + (unsigned long long)((cnt * 4) + fce_start)); + else + seq_putc(s, ' '); + seq_printf(s, "%08x", *fce++); + } + + seq_puts(s, "\nEnd\n"); + } else { + seq_puts(s, "FCE Trace is currently not enabled\n"); + seq_puts(s, "\techo [ 1 | 0 ] > fce\n"); } - seq_puts(s, "\nEnd\n"); - mutex_unlock(&ha->fce_mutex); return 0; @@ -467,7 +472,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file) struct qla_hw_data *ha = vha->hw; int rval; - if (ha->flags.fce_enabled) + if (ha->flags.fce_enabled || !ha->fce) goto out; mutex_lock(&ha->fce_mutex); @@ -488,11 +493,88 @@ out: return single_release(inode, file); } +static ssize_t +qla2x00_dfs_fce_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct scsi_qla_host *vha = s->private; + struct qla_hw_data *ha = vha->hw; + char *buf; + int rc = 0; + unsigned long enable; + + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) { + ql_dbg(ql_dbg_user, vha, 0xd034, + "this adapter does not support FCE."); + return -EINVAL; + } + + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) { + ql_dbg(ql_dbg_user, vha, 0xd037, + "fail to copy user buffer."); + return PTR_ERR(buf); + } + + enable = kstrtoul(buf, 0, 0); + rc = count; + + mutex_lock(&ha->fce_mutex); + + if (enable) { + if (ha->flags.user_enabled_fce) { + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + ha->flags.user_enabled_fce = 1; + if (!ha->fce) { + rc = qla2x00_alloc_fce_trace(vha); + if (rc) { + ha->flags.user_enabled_fce = 0; + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + + /* adjust fw dump buffer to take into account of this feature */ + if (!ha->flags.fce_dump_buf_alloced) + qla2x00_alloc_fw_dump(vha); + } + + if (!ha->flags.fce_enabled) + qla_enable_fce_trace(vha); + + ql_dbg(ql_dbg_user, vha, 0xd045, "User enabled FCE .\n"); + } else { + if (!ha->flags.user_enabled_fce) { + mutex_unlock(&ha->fce_mutex); + goto out_free; + } + ha->flags.user_enabled_fce = 0; + if (ha->flags.fce_enabled) { + qla2x00_disable_fce_trace(vha, NULL, NULL); + ha->flags.fce_enabled = 0; + } + + qla2x00_free_fce_trace(ha); + /* no need to re-adjust fw dump buffer */ + + ql_dbg(ql_dbg_user, vha, 0xd04f, "User disabled FCE .\n"); + } + + mutex_unlock(&ha->fce_mutex); +out_free: + kfree(buf); + return rc; +} + static const struct file_operations dfs_fce_ops = { .open = qla2x00_dfs_fce_open, .read = seq_read, .llseek = seq_lseek, .release = qla2x00_dfs_fce_release, + .write = qla2x00_dfs_fce_write, }; static int @@ -671,8 +753,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto out; - if (!ha->fce) - goto out; if (qla2x00_dfs_root) goto create_dir; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 73cd869caf60..f991fb81c72b 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -11,6 +11,9 @@ /* * Global Function Prototypes in qla_init.c source file. */ +int qla2x00_alloc_fce_trace(scsi_qla_host_t *); +void qla2x00_free_fce_trace(struct qla_hw_data *ha); +void qla_enable_fce_trace(scsi_qla_host_t *); extern int qla2x00_initialize_adapter(scsi_qla_host_t *); extern int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index a65c60160820..682e74196f4b 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2682,7 +2682,7 @@ exit: return rval; } -static void qla_enable_fce_trace(scsi_qla_host_t *vha) +void qla_enable_fce_trace(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; @@ -3718,25 +3718,24 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) return rval; } -static void -qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) +int qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) { dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; if (!IS_FWI2_CAPABLE(ha)) - return; + return -EINVAL; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) - return; + return -EINVAL; if (ha->fce) { ql_dbg(ql_dbg_init, vha, 0x00bd, "%s: FCE Mem is already allocated.\n", __func__); - return; + return -EIO; } /* Allocate memory for Fibre Channel Event Buffer. */ @@ -3746,7 +3745,7 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) ql_log(ql_log_warn, vha, 0x00be, "Unable to allocate (%d KB) for FCE.\n", FCE_SIZE / 1024); - return; + return -ENOMEM; } ql_dbg(ql_dbg_init, vha, 0x00c0, @@ -3755,6 +3754,16 @@ qla2x00_alloc_fce_trace(scsi_qla_host_t *vha) ha->fce_dma = tc_dma; ha->fce = tc; ha->fce_bufs = FCE_NUM_BUFFERS; + return 0; +} + +void qla2x00_free_fce_trace(struct qla_hw_data *ha) +{ + if (!ha->fce) + return; + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma); + ha->fce = NULL; + ha->fce_dma = 0; } static void @@ -3845,9 +3854,10 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); - qla2x00_alloc_fce_trace(vha); - if (ha->fce) + if (ha->fce) { fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; + ha->flags.fce_dump_buf_alloced = 1; + } qla2x00_alloc_eft_trace(vha); if (ha->eft) eft_size = EFT_SIZE; diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index 9e8df452ee14..8e582fbb3403 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -515,6 +515,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) return(NULL); } + vha->irq_offset = QLA_BASE_VECTORS; host = vha->host; fc_vport->dd_data = vha; /* New host info */ diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 41a7ffaabfd1..513a9749ea3d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -6883,12 +6883,15 @@ qla2x00_do_dpc(void *data) set_user_nice(current, MIN_NICE); set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { + while (1) { ql_dbg(ql_dbg_dpc, base_vha, 0x4000, "DPC handler sleeping.\n"); schedule(); + if (kthread_should_stop()) + break; + if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) qla_pci_set_eeh_busy(base_vha); @@ -6901,15 +6904,16 @@ qla2x00_do_dpc(void *data) goto end_loop; } + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + /* don't do any work. Wait to be terminated by kthread_stop */ + goto end_loop; + ha->dpc_active = 1; ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001, "DPC handler waking up, dpc_flags=0x%lx.\n", base_vha->dpc_flags); - if (test_bit(UNLOADING, &base_vha->dpc_flags)) - break; - if (IS_P3P_TYPE(ha)) { if (IS_QLA8044(ha)) { if (test_and_clear_bit(ISP_UNRECOVERABLE, @@ -7227,9 +7231,6 @@ end_loop: */ ha->dpc_active = 0; - /* Cleanup any residual CTX SRBs. */ - qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); - return 0; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index b77035ddc944..2493e07a1a5b 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -5672,7 +5672,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, if (sdebug_host_max_queue) sd_dp->hc_idx = get_tag(cmnd); - if (polled) + if (polled || (ndelay > 0 && ndelay < INCLUSIVE_TIMING_MAX_NS)) ns_from_boot = ktime_get_boottime_ns(); /* one of the resp_*() response functions is called here */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 5c5954b78585..df61d7b90665 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -782,12 +782,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) case 0x1a: /* start stop unit in progress */ case 0x1b: /* sanitize in progress */ case 0x1d: /* configuration in progress */ - case 0x24: /* depopulation in progress */ action = ACTION_DELAYED_RETRY; break; case 0x0a: /* ALUA state transition */ action = ACTION_DELAYED_REPREP; break; + /* + * Depopulation might take many hours, + * thus it is not worthwhile to retry. + */ + case 0x24: /* depopulation in progress */ + case 0x25: /* depopulation restore in progress */ + fallthrough; default: action = ACTION_FAIL; break; @@ -1152,8 +1158,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request); */ static void scsi_cleanup_rq(struct request *rq) { + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + cmd->flags = 0; + if (rq->rq_flags & RQF_DONTPREP) { - scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + scsi_mq_uninit_cmd(cmd); rq->rq_flags &= ~RQF_DONTPREP; } } @@ -1573,13 +1583,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req) if (in_flight) __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); - /* - * Only clear the driver-private command data if the LLD does not supply - * a function to initialize that data. - */ - if (!shost->hostt->init_cmd_priv) - memset(cmd + 1, 0, shost->hostt->cmd_size); - cmd->prot_op = SCSI_PROT_NORMAL; if (blk_rq_bytes(req)) cmd->sc_data_direction = rq_dma_dir(req); @@ -1741,6 +1744,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, if (!scsi_host_queue_ready(q, shost, sdev, cmd)) goto out_dec_target_busy; + /* + * Only clear the driver-private command data if the LLD does not supply + * a function to initialize that data. + */ + if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv) + memset(cmd + 1, 0, shost->hostt->cmd_size); + if (!(req->rq_flags & RQF_DONTPREP)) { ret = scsi_prepare_cmd(req); if (ret != BLK_STS_OK) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 852d509b19b2..69288303e600 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -245,7 +245,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, } ret = sbitmap_init_node(&sdev->budget_map, scsi_device_max_queue_depth(sdev), - new_shift, GFP_KERNEL, + new_shift, GFP_NOIO, sdev->request_queue->node, false, true); if (!ret) sbitmap_resize(&sdev->budget_map, depth); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 49dbcd67579a..7f0fd03dd03b 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -3207,11 +3207,14 @@ iscsi_set_host_param(struct iscsi_transport *transport, } /* see similar check in iscsi_if_set_param() */ - if (strlen(data) > ev->u.set_host_param.len) - return -EINVAL; + if (strlen(data) > ev->u.set_host_param.len) { + err = -EINVAL; + goto out; + } err = transport->set_host_param(shost, ev->u.set_host_param.param, data, ev->u.set_host_param.len); +out: scsi_host_put(shost); return err; } @@ -4102,7 +4105,7 @@ iscsi_if_rx(struct sk_buff *skb) } do { /* - * special case for GET_STATS: + * special case for GET_STATS, GET_CHAP and GET_HOST_STATS: * on success - sending reply and stats from * inside of if_recv_msg(), * on error - fall through. @@ -4111,6 +4114,8 @@ iscsi_if_rx(struct sk_buff *skb) break; if (ev->type == ISCSI_UEVENT_GET_CHAP && !err) break; + if (ev->type == ISCSI_UEVENT_GET_HOST_STATS && !err) + break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); if (err == -EAGAIN && --retries < 0) { diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b3fb18262c5c..c0a79066a7f4 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2191,6 +2191,10 @@ sd_spinup_disk(struct scsi_disk *sdkp) break; /* unavailable */ if (sshdr.asc == 4 && sshdr.ascq == 0x1b) break; /* sanitize in progress */ + if (sshdr.asc == 4 && sshdr.ascq == 0x24) + break; /* depopulation in progress */ + if (sshdr.asc == 4 && sshdr.ascq == 0x25) + break; /* depopulation restoration in progress */ /* * Issue command to spin up drive when not ready */ diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 4c78288ffa72..5fc6bf95258c 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -197,6 +197,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, unsigned int nr_zones, size_t *buflen) { struct request_queue *q = sdkp->disk->queue; + unsigned int max_segments; size_t bufsize; void *buf; @@ -208,12 +209,15 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, * Furthermore, since the report zone command cannot be split, make * sure that the allocated buffer can always be mapped by limiting the * number of pages allocated to the HBA max segments limit. + * Since max segments can be larger than the max inline bio vectors, + * further limit the allocated buffer to BIO_MAX_INLINE_VECS. */ nr_zones = min(nr_zones, sdkp->zone_info.nr_zones); bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); bufsize = min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); - bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); + max_segments = min(BIO_MAX_INLINE_VECS, queue_max_segments(q)); + bufsize = min_t(size_t, bufsize, max_segments << PAGE_SHIFT); while (bufsize >= SECTOR_SIZE) { buf = kvzalloc(bufsize, GFP_KERNEL | __GFP_NORETRY); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 12344be14232..1946cc96c172 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -390,7 +390,6 @@ sg_release(struct inode *inode, struct file *filp) mutex_lock(&sdp->open_rel_lock); scsi_autopm_put_device(sdp->device); - kref_put(&sfp->f_ref, sg_remove_sfp); sdp->open_cnt--; /* possibly many open()s waiting on exlude clearing, start many; @@ -402,6 +401,7 @@ sg_release(struct inode *inode, struct file *filp) wake_up_interruptible(&sdp->open_wait); } mutex_unlock(&sdp->open_rel_lock); + kref_put(&sfp->f_ref, sg_remove_sfp); return 0; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index b90a440e135d..9ba5ad106b65 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -950,7 +950,6 @@ static void reset_state(struct scsi_tape *STp) STp->partition = find_partition(STp); if (STp->partition < 0) STp->partition = 0; - STp->new_partition = STp->partition; } } @@ -2887,7 +2886,6 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon timeout = STp->long_timeout * 8; DEBC_printk(STp, "Erasing tape.\n"); - fileno = blkno = at_sm = 0; break; case MTSETBLK: /* Set block length */ case MTSETDENSITY: /* Set tape density */ @@ -2920,14 +2918,17 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon if (cmd_in == MTSETDENSITY) { (STp->buffer)->b_data[4] = arg; STp->density_changed = 1; /* At least we tried ;-) */ + STp->changed_density = arg; } else if (cmd_in == SET_DENS_AND_BLK) (STp->buffer)->b_data[4] = arg >> 24; else (STp->buffer)->b_data[4] = STp->density; if (cmd_in == MTSETBLK || cmd_in == SET_DENS_AND_BLK) { ltmp = arg & MT_ST_BLKSIZE_MASK; - if (cmd_in == MTSETBLK) + if (cmd_in == MTSETBLK) { STp->blksize_changed = 1; /* At least we tried ;-) */ + STp->changed_blksize = arg; + } } else ltmp = STp->block_size; (STp->buffer)->b_data[9] = (ltmp >> 16); @@ -3074,7 +3075,9 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon cmd_in == MTSETDRVBUFFER || cmd_in == SET_DENS_AND_BLK) { if (cmdstatp->sense_hdr.sense_key == ILLEGAL_REQUEST && - !(STp->use_pf & PF_TESTED)) { + cmdstatp->sense_hdr.asc == 0x24 && + (STp->device)->scsi_level <= SCSI_2 && + !(STp->use_pf & PF_TESTED)) { /* Try the other possible state of Page Format if not already tried */ STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED; @@ -3504,6 +3507,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) int i, cmd_nr, cmd_type, bt; int retval = 0; unsigned int blk; + bool cmd_mtiocget; struct scsi_tape *STp = file->private_data; struct st_modedef *STm; struct st_partstat *STps; @@ -3617,6 +3621,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) */ if (mtc.mt_op != MTREW && mtc.mt_op != MTOFFL && + mtc.mt_op != MTLOAD && mtc.mt_op != MTRETEN && mtc.mt_op != MTERASE && mtc.mt_op != MTSEEK && @@ -3624,9 +3629,25 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) retval = (-EIO); goto out; } - reset_state(STp); + reset_state(STp); /* Clears pos_unknown */ /* remove this when the midlevel properly clears was_reset */ STp->device->was_reset = 0; + + /* Fix the device settings after reset, ignore errors */ + if (mtc.mt_op == MTREW || mtc.mt_op == MTSEEK || + mtc.mt_op == MTEOM) { + if (STp->can_partitions) { + /* STp->new_partition contains the + * latest partition set + */ + STp->partition = 0; + switch_partition(STp); + } + if (STp->density_changed) + st_int_ioctl(STp, MTSETDENSITY, STp->changed_density); + if (STp->blksize_changed) + st_int_ioctl(STp, MTSETBLK, STp->changed_blksize); + } } if (mtc.mt_op != MTNOP && mtc.mt_op != MTSETBLK && @@ -3730,17 +3751,28 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) goto out; } + cmd_mtiocget = cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET); + if ((i = flush_buffer(STp, 0)) < 0) { - retval = i; - goto out; - } - if (STp->can_partitions && - (i = switch_partition(STp)) < 0) { - retval = i; - goto out; + if (cmd_mtiocget && STp->pos_unknown) { + /* flush fails -> modify status accordingly */ + reset_state(STp); + STp->pos_unknown = 1; + } else { /* return error */ + retval = i; + goto out; + } + } else { /* flush_buffer succeeds */ + if (STp->can_partitions) { + i = switch_partition(STp); + if (i < 0) { + retval = i; + goto out; + } + } } - if (cmd_type == _IOC_TYPE(MTIOCGET) && cmd_nr == _IOC_NR(MTIOCGET)) { + if (cmd_mtiocget) { struct mtget mt_status; if (_IOC_SIZE(cmd_in) != sizeof(struct mtget)) { @@ -3754,7 +3786,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) ((STp->density << MT_ST_DENSITY_SHIFT) & MT_ST_DENSITY_MASK); mt_status.mt_blkno = STps->drv_block; mt_status.mt_fileno = STps->drv_file; - if (STp->block_size != 0) { + if (STp->block_size != 0 && mt_status.mt_blkno >= 0) { if (STps->rw == ST_WRITING) mt_status.mt_blkno += (STp->buffer)->buffer_bytes / STp->block_size; @@ -4099,7 +4131,7 @@ static void validate_options(void) */ static int __init st_setup(char *str) { - int i, len, ints[5]; + int i, len, ints[ARRAY_SIZE(parms) + 1]; char *stp; stp = get_options(str, ARRAY_SIZE(ints), ints); diff --git a/drivers/scsi/st.h b/drivers/scsi/st.h index 7a68eaba7e81..2105c6a5b458 100644 --- a/drivers/scsi/st.h +++ b/drivers/scsi/st.h @@ -165,12 +165,14 @@ struct scsi_tape { unsigned char compression_changed; unsigned char drv_buffer; unsigned char density; + unsigned char changed_density; unsigned char door_locked; unsigned char autorew_dev; /* auto-rewind device */ unsigned char rew_at_close; /* rewind necessary at close */ unsigned char inited; unsigned char cleaning_req; /* cleaning requested? */ int block_size; + int changed_blksize; int min_block; int max_block; int recover_count; /* From tape opening */ diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 4fad9d85bd6f..591186daf46f 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -149,6 +149,8 @@ struct hv_fc_wwn_packet { */ static int vmstor_proto_version; +static bool hv_dev_is_fc(struct hv_device *hv_dev); + #define STORVSC_LOGGING_NONE 0 #define STORVSC_LOGGING_ERROR 1 #define STORVSC_LOGGING_WARN 2 @@ -169,6 +171,12 @@ do { \ dev_warn(&(dev)->device, fmt, ##__VA_ARGS__); \ } while (0) +#define storvsc_log_ratelimited(dev, level, fmt, ...) \ +do { \ + if (do_logging(level)) \ + dev_warn_ratelimited(&(dev)->device, fmt, ##__VA_ARGS__); \ +} while (0) + struct vmscsi_request { u16 length; u8 srb_status; @@ -1129,6 +1137,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, * not correctly handle: * INQUIRY command with page code parameter set to 0x80 * MODE_SENSE command with cmd[2] == 0x1c + * MAINTENANCE_IN is not supported by HyperV FC passthrough * * Setup srb and scsi status so this won't be fatal. * We do this so we can distinguish truly fatal failues @@ -1136,7 +1145,9 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, */ if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) || - (stor_pkt->vm_srb.cdb[0] == MODE_SENSE)) { + (stor_pkt->vm_srb.cdb[0] == MODE_SENSE) || + (stor_pkt->vm_srb.cdb[0] == MAINTENANCE_IN && + hv_dev_is_fc(device))) { vstor_packet->vm_srb.scsi_status = 0; vstor_packet->vm_srb.srb_status = SRB_STATUS_SUCCESS; } @@ -1163,7 +1174,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device, int loglevel = (stor_pkt->vm_srb.cdb[0] == TEST_UNIT_READY) ? STORVSC_LOGGING_WARN : STORVSC_LOGGING_ERROR; - storvsc_log(device, loglevel, + storvsc_log_ratelimited(device, loglevel, "tag#%d cmd 0x%x status: scsi 0x%x srb 0x%x hv 0x%x\n", scsi_cmd_to_rq(request->cmd)->tag, stor_pkt->vm_srb.cdb[0], @@ -1780,6 +1791,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) length = scsi_bufflen(scmnd); payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb; + payload->range.len = 0; payload_sz = 0; if (scsi_sg_count(scmnd)) { @@ -1798,6 +1810,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd) return SCSI_MLQUEUE_DEVICE_BUSY; } + payload->rangecount = 1; payload->range.len = length; payload->range.offset = offset_in_hvpg; diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index d996782a7106..7a73f5e4a1fc 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -295,7 +295,7 @@ int clk_enable(struct clk *clk) int ret; if (!clk) - return -EINVAL; + return 0; spin_lock_irqsave(&clock_lock, flags); ret = __clk_enable(clk); diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index ca4f4ca413f1..b19388b349be 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -209,7 +209,6 @@ int __init register_intc_controller(struct intc_desc *desc) goto err0; INIT_LIST_HEAD(&d->list); - list_add_tail(&d->list, &intc_list); raw_spin_lock_init(&d->lock); INIT_RADIX_TREE(&d->tree, GFP_ATOMIC); @@ -369,6 +368,7 @@ int __init register_intc_controller(struct intc_desc *desc) d->skip_suspend = desc->skip_syscore_suspend; + list_add_tail(&d->list, &intc_list); nr_intc_controllers++; return 0; diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c index 4ce0cb61e481..245e9c7f92cd 100644 --- a/drivers/slimbus/messaging.c +++ b/drivers/slimbus/messaging.c @@ -147,8 +147,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn) } ret = ctrl->xfer_msg(ctrl, txn); - - if (!ret && need_tid && !txn->msg->comp) { + if (ret == -ETIMEDOUT) { + slim_free_txn_tid(ctrl, txn); + } else if (!ret && need_tid && !txn->msg->comp) { unsigned long ms = txn->rl + HZ; timeout = wait_for_completion_timeout(txn->comp, diff --git a/drivers/soc/apple/rtkit-internal.h b/drivers/soc/apple/rtkit-internal.h index 24bd619ec5e4..1da1dfd9cb19 100644 --- a/drivers/soc/apple/rtkit-internal.h +++ b/drivers/soc/apple/rtkit-internal.h @@ -48,6 +48,7 @@ struct apple_rtkit { struct apple_rtkit_shmem ioreport_buffer; struct apple_rtkit_shmem crashlog_buffer; + struct apple_rtkit_shmem oslog_buffer; struct apple_rtkit_shmem syslog_buffer; char *syslog_msg_buffer; diff --git a/drivers/soc/apple/rtkit.c b/drivers/soc/apple/rtkit.c index 8ec74d7539eb..968f9f633393 100644 --- a/drivers/soc/apple/rtkit.c +++ b/drivers/soc/apple/rtkit.c @@ -65,8 +65,9 @@ enum { #define APPLE_RTKIT_SYSLOG_MSG_SIZE GENMASK_ULL(31, 24) #define APPLE_RTKIT_OSLOG_TYPE GENMASK_ULL(63, 56) -#define APPLE_RTKIT_OSLOG_INIT 1 -#define APPLE_RTKIT_OSLOG_ACK 3 +#define APPLE_RTKIT_OSLOG_BUFFER_REQUEST 1 +#define APPLE_RTKIT_OSLOG_SIZE GENMASK_ULL(55, 36) +#define APPLE_RTKIT_OSLOG_IOVA GENMASK_ULL(35, 0) #define APPLE_RTKIT_MIN_SUPPORTED_VERSION 11 #define APPLE_RTKIT_MAX_SUPPORTED_VERSION 12 @@ -255,15 +256,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, struct apple_rtkit_shmem *buffer, u8 ep, u64 msg) { - size_t n_4kpages = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg); u64 reply; int err; + /* The different size vs. IOVA shifts look odd but are indeed correct this way */ + if (ep == APPLE_RTKIT_EP_OSLOG) { + buffer->size = FIELD_GET(APPLE_RTKIT_OSLOG_SIZE, msg); + buffer->iova = FIELD_GET(APPLE_RTKIT_OSLOG_IOVA, msg) << 12; + } else { + buffer->size = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_SIZE, msg) << 12; + buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); + } + buffer->buffer = NULL; buffer->iomem = NULL; buffer->is_mapped = false; - buffer->iova = FIELD_GET(APPLE_RTKIT_BUFFER_REQUEST_IOVA, msg); - buffer->size = n_4kpages << 12; dev_dbg(rtk->dev, "RTKit: buffer request for 0x%zx bytes at %pad\n", buffer->size, &buffer->iova); @@ -288,11 +295,21 @@ static int apple_rtkit_common_rx_get_buffer(struct apple_rtkit *rtk, } if (!buffer->is_mapped) { - reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE, - APPLE_RTKIT_BUFFER_REQUEST); - reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, n_4kpages); - reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA, - buffer->iova); + /* oslog uses different fields and needs a shifted IOVA instead of size */ + if (ep == APPLE_RTKIT_EP_OSLOG) { + reply = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, + APPLE_RTKIT_OSLOG_BUFFER_REQUEST); + reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_SIZE, buffer->size); + reply |= FIELD_PREP(APPLE_RTKIT_OSLOG_IOVA, + buffer->iova >> 12); + } else { + reply = FIELD_PREP(APPLE_RTKIT_SYSLOG_TYPE, + APPLE_RTKIT_BUFFER_REQUEST); + reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_SIZE, + buffer->size >> 12); + reply |= FIELD_PREP(APPLE_RTKIT_BUFFER_REQUEST_IOVA, + buffer->iova); + } apple_rtkit_send_message(rtk, ep, reply, NULL, false); } @@ -474,25 +491,18 @@ static void apple_rtkit_syslog_rx(struct apple_rtkit *rtk, u64 msg) } } -static void apple_rtkit_oslog_rx_init(struct apple_rtkit *rtk, u64 msg) -{ - u64 ack; - - dev_dbg(rtk->dev, "RTKit: oslog init: msg: 0x%llx\n", msg); - ack = FIELD_PREP(APPLE_RTKIT_OSLOG_TYPE, APPLE_RTKIT_OSLOG_ACK); - apple_rtkit_send_message(rtk, APPLE_RTKIT_EP_OSLOG, ack, NULL, false); -} - static void apple_rtkit_oslog_rx(struct apple_rtkit *rtk, u64 msg) { u8 type = FIELD_GET(APPLE_RTKIT_OSLOG_TYPE, msg); switch (type) { - case APPLE_RTKIT_OSLOG_INIT: - apple_rtkit_oslog_rx_init(rtk, msg); + case APPLE_RTKIT_OSLOG_BUFFER_REQUEST: + apple_rtkit_common_rx_get_buffer(rtk, &rtk->oslog_buffer, + APPLE_RTKIT_EP_OSLOG, msg); break; default: - dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", msg); + dev_warn(rtk->dev, "RTKit: Unknown oslog message: %llx\n", + msg); } } @@ -731,7 +741,7 @@ static struct apple_rtkit *apple_rtkit_init(struct device *dev, void *cookie, rtk->mbox_cl.rx_callback = &apple_rtkit_rx; rtk->mbox_cl.tx_done = &apple_rtkit_tx_done; - rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_MEM_RECLAIM, + rtk->wq = alloc_ordered_workqueue("rtkit-%s", WQ_HIGHPRI | WQ_MEM_RECLAIM, dev_name(rtk->dev)); if (!rtk->wq) { ret = -ENOMEM; @@ -773,6 +783,7 @@ int apple_rtkit_reinit(struct apple_rtkit *rtk) apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer); apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer); kfree(rtk->syslog_msg_buffer); @@ -935,6 +946,7 @@ static void apple_rtkit_free(void *data) apple_rtkit_free_buffer(rtk, &rtk->ioreport_buffer); apple_rtkit_free_buffer(rtk, &rtk->crashlog_buffer); + apple_rtkit_free_buffer(rtk, &rtk->oslog_buffer); apple_rtkit_free_buffer(rtk, &rtk->syslog_buffer); kfree(rtk->syslog_msg_buffer); diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index dae8a2e0f745..78cb2c4bd392 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c @@ -367,7 +367,7 @@ static const struct of_device_id at91_soc_allowed_list[] __initconst = { static int __init atmel_soc_device_init(void) { - struct device_node *np = of_find_node_by_path("/"); + struct device_node *np __free(device_node) = of_find_node_by_path("/"); if (!of_match_node(at91_soc_allowed_list, np)) return 0; diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c index 3d0cae30c769..06bd94b29fb3 100644 --- a/drivers/soc/fsl/rcpm.c +++ b/drivers/soc/fsl/rcpm.c @@ -36,6 +36,7 @@ static void copy_ippdexpcr1_setting(u32 val) return; regs = of_iomap(np, 0); + of_node_put(np); if (!regs) return; diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c index 88aee59730e3..6d5b6ed36169 100644 --- a/drivers/soc/imx/gpcv2.c +++ b/drivers/soc/imx/gpcv2.c @@ -1347,7 +1347,7 @@ static int imx_pgc_domain_probe(struct platform_device *pdev) } if (IS_ENABLED(CONFIG_LOCKDEP) && - of_property_read_bool(domain->dev->of_node, "power-domains")) + of_property_present(domain->dev->of_node, "power-domains")) lockdep_set_subclass(&domain->genpd.mlock, 1); ret = of_genpd_add_provider_simple(domain->dev->of_node, diff --git a/drivers/soc/imx/imx8mp-blk-ctrl.c b/drivers/soc/imx/imx8mp-blk-ctrl.c index 0f13853901df..9bc536ee1395 100644 --- a/drivers/soc/imx/imx8mp-blk-ctrl.c +++ b/drivers/soc/imx/imx8mp-blk-ctrl.c @@ -659,7 +659,7 @@ static int imx8mp_blk_ctrl_remove(struct platform_device *pdev) of_genpd_del_provider(pdev->dev.of_node); - for (i = 0; bc->onecell_data.num_domains; i++) { + for (i = 0; i < bc->onecell_data.num_domains; i++) { struct imx8mp_blk_ctrl_domain *domain = &bc->domains[i]; pm_genpd_remove(&domain->genpd); diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index 08197b03955d..006cad061a32 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -30,11 +30,9 @@ struct imx8_soc_data { char *name; - u32 (*soc_revision)(void); + int (*soc_revision)(u32 *socrev, u64 *socuid); }; -static u64 soc_uid; - #ifdef CONFIG_HAVE_ARM_SMCCC static u32 imx8mq_soc_revision_from_atf(void) { @@ -51,24 +49,27 @@ static u32 imx8mq_soc_revision_from_atf(void) static inline u32 imx8mq_soc_revision_from_atf(void) { return 0; }; #endif -static u32 __init imx8mq_soc_revision(void) +static int imx8mq_soc_revision(u32 *socrev, u64 *socuid) { - struct device_node *np; + struct device_node *np __free(device_node) = + of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp"); void __iomem *ocotp_base; u32 magic; u32 rev; struct clk *clk; + int ret; - np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp"); if (!np) - return 0; + return -EINVAL; ocotp_base = of_iomap(np, 0); - WARN_ON(!ocotp_base); + if (!ocotp_base) + return -EINVAL; + clk = of_clk_get_by_name(np, NULL); if (IS_ERR(clk)) { - WARN_ON(IS_ERR(clk)); - return 0; + ret = PTR_ERR(clk); + goto err_clk; } clk_prepare_enable(clk); @@ -84,71 +85,78 @@ static u32 __init imx8mq_soc_revision(void) rev = REV_B1; } - soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); - soc_uid <<= 32; - soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); + *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH); + *socuid <<= 32; + *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); + + *socrev = rev; clk_disable_unprepare(clk); clk_put(clk); iounmap(ocotp_base); - of_node_put(np); - return rev; + return 0; + +err_clk: + iounmap(ocotp_base); + return ret; } -static void __init imx8mm_soc_uid(void) +static int imx8mm_soc_uid(u64 *socuid) { + struct device_node *np __free(device_node) = + of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp"); void __iomem *ocotp_base; - struct device_node *np; struct clk *clk; + int ret = 0; u32 offset = of_machine_is_compatible("fsl,imx8mp") ? IMX8MP_OCOTP_UID_OFFSET : 0; - np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp"); if (!np) - return; + return -EINVAL; ocotp_base = of_iomap(np, 0); - WARN_ON(!ocotp_base); + if (!ocotp_base) + return -EINVAL; + clk = of_clk_get_by_name(np, NULL); if (IS_ERR(clk)) { - WARN_ON(IS_ERR(clk)); - return; + ret = PTR_ERR(clk); + goto err_clk; } clk_prepare_enable(clk); - soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset); - soc_uid <<= 32; - soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset); + *socuid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset); + *socuid <<= 32; + *socuid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset); clk_disable_unprepare(clk); clk_put(clk); + +err_clk: iounmap(ocotp_base); - of_node_put(np); + return ret; } -static u32 __init imx8mm_soc_revision(void) +static int imx8mm_soc_revision(u32 *socrev, u64 *socuid) { - struct device_node *np; + struct device_node *np __free(device_node) = + of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); void __iomem *anatop_base; - u32 rev; - np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); if (!np) - return 0; + return -EINVAL; anatop_base = of_iomap(np, 0); - WARN_ON(!anatop_base); + if (!anatop_base) + return -EINVAL; - rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM); + *socrev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM); iounmap(anatop_base); - of_node_put(np); - imx8mm_soc_uid(); - - return rev; + return imx8mm_soc_uid(socuid); } static const struct imx8_soc_data imx8mq_soc_data = { @@ -179,21 +187,34 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = { { } }; -#define imx8_revision(soc_rev) \ - soc_rev ? \ - kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \ +#define imx8_revision(dev, soc_rev) \ + (soc_rev) ? \ + devm_kasprintf((dev), GFP_KERNEL, "%d.%d", ((soc_rev) >> 4) & 0xf, (soc_rev) & 0xf) : \ "unknown" -static int __init imx8_soc_init(void) +static void imx8m_unregister_soc(void *data) +{ + soc_device_unregister(data); +} + +static void imx8m_unregister_cpufreq(void *data) +{ + platform_device_unregister(data); +} + +static int imx8m_soc_probe(struct platform_device *pdev) { struct soc_device_attribute *soc_dev_attr; - struct soc_device *soc_dev; - const struct of_device_id *id; - u32 soc_rev = 0; + struct platform_device *cpufreq_dev; const struct imx8_soc_data *data; + struct device *dev = &pdev->dev; + const struct of_device_id *id; + struct soc_device *soc_dev; + u32 soc_rev = 0; + u64 soc_uid = 0; int ret; - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); + soc_dev_attr = devm_kzalloc(dev, sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) return -ENOMEM; @@ -201,54 +222,83 @@ static int __init imx8_soc_init(void) ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine); if (ret) - goto free_soc; + return ret; id = of_match_node(imx8_soc_match, of_root); - if (!id) { - ret = -ENODEV; - goto free_soc; - } + if (!id) + return -ENODEV; data = id->data; if (data) { soc_dev_attr->soc_id = data->name; - if (data->soc_revision) - soc_rev = data->soc_revision(); + if (data->soc_revision) { + ret = data->soc_revision(&soc_rev, &soc_uid); + if (ret) + return ret; + } } - soc_dev_attr->revision = imx8_revision(soc_rev); - if (!soc_dev_attr->revision) { - ret = -ENOMEM; - goto free_soc; - } + soc_dev_attr->revision = imx8_revision(dev, soc_rev); + if (!soc_dev_attr->revision) + return -ENOMEM; - soc_dev_attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", soc_uid); - if (!soc_dev_attr->serial_number) { - ret = -ENOMEM; - goto free_rev; - } + soc_dev_attr->serial_number = devm_kasprintf(dev, GFP_KERNEL, "%016llX", soc_uid); + if (!soc_dev_attr->serial_number) + return -ENOMEM; soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR(soc_dev)) { - ret = PTR_ERR(soc_dev); - goto free_serial_number; - } + if (IS_ERR(soc_dev)) + return PTR_ERR(soc_dev); + + ret = devm_add_action(dev, imx8m_unregister_soc, soc_dev); + if (ret) + return ret; pr_info("SoC: %s revision %s\n", soc_dev_attr->soc_id, soc_dev_attr->revision); - if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) - platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); + if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) { + cpufreq_dev = platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); + if (IS_ERR(cpufreq_dev)) + return dev_err_probe(dev, PTR_ERR(cpufreq_dev), + "Failed to register imx-cpufreq-dev device\n"); + ret = devm_add_action(dev, imx8m_unregister_cpufreq, cpufreq_dev); + if (ret) + return ret; + } return 0; +} -free_serial_number: - kfree(soc_dev_attr->serial_number); -free_rev: - if (strcmp(soc_dev_attr->revision, "unknown")) - kfree(soc_dev_attr->revision); -free_soc: - kfree(soc_dev_attr); - return ret; +static struct platform_driver imx8m_soc_driver = { + .probe = imx8m_soc_probe, + .driver = { + .name = "imx8m-soc", + }, +}; + +static int __init imx8_soc_init(void) +{ + struct platform_device *pdev; + int ret; + + /* No match means this is non-i.MX8M hardware, do nothing. */ + if (!of_match_node(imx8_soc_match, of_root)) + return 0; + + ret = platform_driver_register(&imx8m_soc_driver); + if (ret) { + pr_err("Failed to register imx8m-soc platform driver: %d\n", ret); + return ret; + } + + pdev = platform_device_register_simple("imx8m-soc", -1, NULL, 0); + if (IS_ERR(pdev)) { + pr_err("Failed to register imx8m-soc platform device: %ld\n", PTR_ERR(pdev)); + platform_driver_unregister(&imx8m_soc_driver); + return PTR_ERR(pdev); + } + + return 0; } device_initcall(imx8_soc_init); diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c index fc13334db1b1..7269ab8d29b6 100644 --- a/drivers/soc/mediatek/mtk-devapc.c +++ b/drivers/soc/mediatek/mtk-devapc.c @@ -273,44 +273,44 @@ static int mtk_devapc_probe(struct platform_device *pdev) return -EINVAL; devapc_irq = irq_of_parse_and_map(node, 0); - if (!devapc_irq) - return -EINVAL; + if (!devapc_irq) { + ret = -EINVAL; + goto err; + } - ctx->infra_clk = devm_clk_get(&pdev->dev, "devapc-infra-clock"); - if (IS_ERR(ctx->infra_clk)) - return -EINVAL; - - if (clk_prepare_enable(ctx->infra_clk)) - return -EINVAL; + ctx->infra_clk = devm_clk_get_enabled(&pdev->dev, "devapc-infra-clock"); + if (IS_ERR(ctx->infra_clk)) { + ret = -EINVAL; + goto err; + } ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq, IRQF_TRIGGER_NONE, "devapc", ctx); - if (ret) { - clk_disable_unprepare(ctx->infra_clk); - return ret; - } + if (ret) + goto err; platform_set_drvdata(pdev, ctx); start_devapc(ctx); return 0; + +err: + iounmap(ctx->infra_base); + return ret; } -static int mtk_devapc_remove(struct platform_device *pdev) +static void mtk_devapc_remove(struct platform_device *pdev) { struct mtk_devapc_context *ctx = platform_get_drvdata(pdev); stop_devapc(ctx); - - clk_disable_unprepare(ctx->infra_clk); - - return 0; + iounmap(ctx->infra_base); } static struct platform_driver mtk_devapc_driver = { .probe = mtk_devapc_probe, - .remove = mtk_devapc_remove, + .remove_new = mtk_devapc_remove, .driver = { .name = "mtk-devapc", .of_match_table = mtk_devapc_dt_match, diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index c7cd4daa10b0..f83491a7510e 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -74,7 +74,6 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, { struct pdr_handle *pdr = container_of(qmi, struct pdr_handle, locator_hdl); - struct pdr_service *pds; mutex_lock(&pdr->lock); /* Create a local client port for QMI communication */ @@ -86,12 +85,7 @@ static int pdr_locator_new_server(struct qmi_handle *qmi, mutex_unlock(&pdr->lock); /* Service pending lookup requests */ - mutex_lock(&pdr->list_lock); - list_for_each_entry(pds, &pdr->lookups, node) { - if (pds->need_locator_lookup) - schedule_work(&pdr->locator_work); - } - mutex_unlock(&pdr->list_lock); + schedule_work(&pdr->locator_work); return 0; } diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c index a0ceeede450f..d18309d3d040 100644 --- a/drivers/soc/qcom/qcom-geni-se.c +++ b/drivers/soc/qcom/qcom-geni-se.c @@ -597,7 +597,8 @@ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl) for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) { freq = clk_round_rate(se->clk, freq + 1); - if (freq <= 0 || freq == se->clk_perf_tbl[i - 1]) + if (freq <= 0 || + (i > 0 && freq == se->clk_perf_tbl[i - 1])) break; se->clk_perf_tbl[i] = freq; } diff --git a/drivers/soc/qcom/smem_state.c b/drivers/soc/qcom/smem_state.c index e848cc9a3cf8..a8be3a2f3382 100644 --- a/drivers/soc/qcom/smem_state.c +++ b/drivers/soc/qcom/smem_state.c @@ -116,7 +116,8 @@ struct qcom_smem_state *qcom_smem_state_get(struct device *dev, if (args.args_count != 1) { dev_err(dev, "invalid #qcom,smem-state-cells\n"); - return ERR_PTR(-EINVAL); + state = ERR_PTR(-EINVAL); + goto put; } state = of_node_to_state(args.np); diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c index aa37e1bad095..e5f142026188 100644 --- a/drivers/soc/qcom/socinfo.c +++ b/drivers/soc/qcom/socinfo.c @@ -649,10 +649,16 @@ static int qcom_socinfo_probe(struct platform_device *pdev) qs->attr.revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u.%u", SOCINFO_MAJOR(le32_to_cpu(info->ver)), SOCINFO_MINOR(le32_to_cpu(info->ver))); - if (offsetof(struct socinfo, serial_num) <= item_size) + if (!qs->attr.soc_id || !qs->attr.revision) + return -ENOMEM; + + if (offsetofend(struct socinfo, serial_num) <= item_size) { qs->attr.serial_number = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%u", le32_to_cpu(info->serial_num)); + if (!qs->attr.serial_number) + return -ENOMEM; + } qs->soc_dev = soc_device_register(&qs->attr); if (IS_ERR(qs->soc_dev)) diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c index 0fb3631e7346..b80bd80aa012 100644 --- a/drivers/soc/samsung/exynos-chipid.c +++ b/drivers/soc/samsung/exynos-chipid.c @@ -131,6 +131,8 @@ static int exynos_chipid_probe(struct platform_device *pdev) soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%x", soc_info.revision); + if (!soc_dev_attr->revision) + return -ENOMEM; soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id); if (!soc_dev_attr->soc_id) { pr_err("Unknown SoC\n"); diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c index 91f441ee6175..5b0d8260918d 100644 --- a/drivers/soc/ti/k3-socinfo.c +++ b/drivers/soc/ti/k3-socinfo.c @@ -60,6 +60,12 @@ k3_chipinfo_partno_to_names(unsigned int partno, return -EINVAL; } +static const struct regmap_config k3_chipinfo_regmap_cfg = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + static int k3_chipinfo_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; @@ -67,13 +73,18 @@ static int k3_chipinfo_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct soc_device *soc_dev; struct regmap *regmap; + void __iomem *base; u32 partno_id; u32 variant; u32 jtag_id; u32 mfg; int ret; - regmap = device_node_to_regmap(node); + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = regmap_init_mmio(dev, base, &k3_chipinfo_regmap_cfg); if (IS_ERR(regmap)) return PTR_ERR(regmap); diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c index 6a389a6444f3..e2e06b848822 100644 --- a/drivers/soc/ti/smartreflex.c +++ b/drivers/soc/ti/smartreflex.c @@ -203,10 +203,10 @@ static int sr_late_init(struct omap_sr *sr_info) if (sr_class->notify && sr_class->notify_flags && sr_info->irq) { ret = devm_request_irq(&sr_info->pdev->dev, sr_info->irq, - sr_interrupt, 0, sr_info->name, sr_info); + sr_interrupt, IRQF_NO_AUTOEN, + sr_info->name, sr_info); if (ret) goto error; - disable_irq(sr_info->irq); } if (pdata && pdata->enable_on_init) diff --git a/drivers/soc/ti/ti_sci_pm_domains.c b/drivers/soc/ti/ti_sci_pm_domains.c index 17984a7bffba..b21b152ed5d0 100644 --- a/drivers/soc/ti/ti_sci_pm_domains.c +++ b/drivers/soc/ti/ti_sci_pm_domains.c @@ -165,6 +165,7 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev) break; if (args.args_count >= 1 && args.np == dev->of_node) { + of_node_put(args.np); if (args.args[0] > max_id) { max_id = args.args[0]; } else { @@ -192,7 +193,10 @@ static int ti_sci_pm_domain_probe(struct platform_device *pdev) pm_genpd_init(&pd->pd, NULL, true); list_add(&pd->node, &pd_provider->pd_list); + } else { + of_node_put(args.np); } + index++; } } diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 82e317474023..e5476010bb3d 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -174,8 +174,10 @@ static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data) INIT_LIST_HEAD(&eve_data->cb_list_head); cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); - if (!cb_data) + if (!cb_data) { + kfree(eve_data); return -ENOMEM; + } cb_data->eve_cb = cb_fun; cb_data->agent_data = data; diff --git a/drivers/soundwire/slave.c b/drivers/soundwire/slave.c index c1c1a2ac293a..28b0d7b6d780 100644 --- a/drivers/soundwire/slave.c +++ b/drivers/soundwire/slave.c @@ -12,6 +12,7 @@ static void sdw_slave_release(struct device *dev) { struct sdw_slave *slave = dev_to_sdw_dev(dev); + of_node_put(slave->dev.of_node); mutex_destroy(&slave->sdw_dev_lock); kfree(slave); } diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index d96c40e7c812..8540fcdf3b42 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -138,11 +138,15 @@ #define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8) #define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC) +#define ATMEL_QSPI_TIMEOUT 1000 /* ms */ + struct atmel_qspi_caps { bool has_qspick; bool has_ricr; }; +struct atmel_qspi_ops; + struct atmel_qspi { void __iomem *regs; void __iomem *mem; @@ -150,13 +154,22 @@ struct atmel_qspi { struct clk *qspick; struct platform_device *pdev; const struct atmel_qspi_caps *caps; + const struct atmel_qspi_ops *ops; resource_size_t mmap_size; u32 pending; + u32 irq_mask; u32 mr; u32 scr; struct completion cmd_completion; }; +struct atmel_qspi_ops { + int (*set_cfg)(struct atmel_qspi *aq, const struct spi_mem_op *op, + u32 *offset); + int (*transfer)(struct spi_mem *mem, const struct spi_mem_op *op, + u32 offset); +}; + struct atmel_qspi_mode { u8 cmd_buswidth; u8 addr_buswidth; @@ -183,7 +196,7 @@ static const char *atmel_qspi_reg_name(u32 offset, char *tmp, size_t sz) case QSPI_MR: return "MR"; case QSPI_RD: - return "MR"; + return "RD"; case QSPI_TD: return "TD"; case QSPI_SR: @@ -375,9 +388,9 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, * If the QSPI controller is set in regular SPI mode, set it in * Serial Memory Mode (SMM). */ - if (aq->mr != QSPI_MR_SMM) { - atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); - aq->mr = QSPI_MR_SMM; + if (!(aq->mr & QSPI_MR_SMM)) { + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR); } /* Clear pending interrupts */ @@ -404,10 +417,67 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq, return 0; } +static int atmel_qspi_wait_for_completion(struct atmel_qspi *aq, u32 irq_mask) +{ + int err = 0; + u32 sr; + + /* Poll INSTRuction End status */ + sr = atmel_qspi_read(aq, QSPI_SR); + if ((sr & irq_mask) == irq_mask) + return 0; + + /* Wait for INSTRuction End interrupt */ + reinit_completion(&aq->cmd_completion); + aq->pending = sr & irq_mask; + aq->irq_mask = irq_mask; + atmel_qspi_write(irq_mask, aq, QSPI_IER); + if (!wait_for_completion_timeout(&aq->cmd_completion, + msecs_to_jiffies(ATMEL_QSPI_TIMEOUT))) + err = -ETIMEDOUT; + atmel_qspi_write(irq_mask, aq, QSPI_IDR); + + return err; +} + +static int atmel_qspi_transfer(struct spi_mem *mem, + const struct spi_mem_op *op, u32 offset) +{ + struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller); + + /* Skip to the final steps if there is no data */ + if (!op->data.nbytes) + return atmel_qspi_wait_for_completion(aq, + QSPI_SR_CMD_COMPLETED); + + /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ + (void)atmel_qspi_read(aq, QSPI_IFR); + + /* Send/Receive data */ + if (op->data.dir == SPI_MEM_DATA_IN) { + memcpy_fromio(op->data.buf.in, aq->mem + offset, + op->data.nbytes); + + /* Synchronize AHB and APB accesses again */ + rmb(); + } else { + memcpy_toio(aq->mem + offset, op->data.buf.out, + op->data.nbytes); + + /* Synchronize AHB and APB accesses again */ + wmb(); + } + + /* Release the chip-select */ + atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); + + return atmel_qspi_wait_for_completion(aq, QSPI_SR_CMD_COMPLETED); +} + static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { - struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master); - u32 sr, offset; + struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller); + u32 offset; int err; /* @@ -416,46 +486,20 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) * when the flash memories overrun the controller's memory space. */ if (op->addr.val + op->data.nbytes > aq->mmap_size) - return -ENOTSUPP; + return -EOPNOTSUPP; + + if (op->addr.nbytes > 4) + return -EOPNOTSUPP; err = pm_runtime_resume_and_get(&aq->pdev->dev); if (err < 0) return err; - err = atmel_qspi_set_cfg(aq, op, &offset); + err = aq->ops->set_cfg(aq, op, &offset); if (err) goto pm_runtime_put; - /* Skip to the final steps if there is no data */ - if (op->data.nbytes) { - /* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */ - (void)atmel_qspi_read(aq, QSPI_IFR); - - /* Send/Receive data */ - if (op->data.dir == SPI_MEM_DATA_IN) - memcpy_fromio(op->data.buf.in, aq->mem + offset, - op->data.nbytes); - else - memcpy_toio(aq->mem + offset, op->data.buf.out, - op->data.nbytes); - - /* Release the chip-select */ - atmel_qspi_write(QSPI_CR_LASTXFER, aq, QSPI_CR); - } - - /* Poll INSTRuction End status */ - sr = atmel_qspi_read(aq, QSPI_SR); - if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) - goto pm_runtime_put; - - /* Wait for INSTRuction End interrupt */ - reinit_completion(&aq->cmd_completion); - aq->pending = sr & QSPI_SR_CMD_COMPLETED; - atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IER); - if (!wait_for_completion_timeout(&aq->cmd_completion, - msecs_to_jiffies(1000))) - err = -ETIMEDOUT; - atmel_qspi_write(QSPI_SR_CMD_COMPLETED, aq, QSPI_IDR); + err = aq->ops->transfer(mem, op, offset); pm_runtime_put: pm_runtime_mark_last_busy(&aq->pdev->dev); @@ -476,7 +520,7 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = { static int atmel_qspi_setup(struct spi_device *spi) { - struct spi_controller *ctrl = spi->master; + struct spi_controller *ctrl = spi->controller; struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); unsigned long src_rate; u32 scbr; @@ -501,7 +545,42 @@ static int atmel_qspi_setup(struct spi_device *spi) if (ret < 0) return ret; - aq->scr = QSPI_SCR_SCBR(scbr); + aq->scr &= ~QSPI_SCR_SCBR_MASK; + aq->scr |= QSPI_SCR_SCBR(scbr); + atmel_qspi_write(aq->scr, aq, QSPI_SCR); + + pm_runtime_mark_last_busy(ctrl->dev.parent); + pm_runtime_put_autosuspend(ctrl->dev.parent); + + return 0; +} + +static int atmel_qspi_set_cs_timing(struct spi_device *spi) +{ + struct spi_controller *ctrl = spi->controller; + struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); + unsigned long clk_rate; + u32 cs_setup; + int delay; + int ret; + + delay = spi_delay_to_ns(&spi->cs_setup, NULL); + if (delay <= 0) + return delay; + + clk_rate = clk_get_rate(aq->pclk); + if (!clk_rate) + return -EINVAL; + + cs_setup = DIV_ROUND_UP((delay * DIV_ROUND_UP(clk_rate, 1000000)), + 1000); + + ret = pm_runtime_resume_and_get(ctrl->dev.parent); + if (ret < 0) + return ret; + + aq->scr &= ~QSPI_SCR_DLYBS_MASK; + aq->scr |= QSPI_SCR_DLYBS(cs_setup); atmel_qspi_write(aq->scr, aq, QSPI_SCR); pm_runtime_mark_last_busy(ctrl->dev.parent); @@ -516,8 +595,8 @@ static void atmel_qspi_init(struct atmel_qspi *aq) atmel_qspi_write(QSPI_CR_SWRST, aq, QSPI_CR); /* Set the QSPI controller by default in Serial Memory Mode */ - atmel_qspi_write(QSPI_MR_SMM, aq, QSPI_MR); - aq->mr = QSPI_MR_SMM; + aq->mr |= QSPI_MR_SMM; + atmel_qspi_write(aq->mr, aq, QSPI_MR); /* Enable the QSPI controller */ atmel_qspi_write(QSPI_CR_QSPIEN, aq, QSPI_CR); @@ -536,12 +615,17 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) return IRQ_NONE; aq->pending |= pending; - if ((aq->pending & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED) + if ((aq->pending & aq->irq_mask) == aq->irq_mask) complete(&aq->cmd_completion); return IRQ_HANDLED; } +static const struct atmel_qspi_ops atmel_qspi_ops = { + .set_cfg = atmel_qspi_set_cfg, + .transfer = atmel_qspi_transfer, +}; + static int atmel_qspi_probe(struct platform_device *pdev) { struct spi_controller *ctrl; @@ -549,12 +633,13 @@ static int atmel_qspi_probe(struct platform_device *pdev) struct resource *res; int irq, err = 0; - ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*aq)); + ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq)); if (!ctrl) return -ENOMEM; ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; ctrl->setup = atmel_qspi_setup; + ctrl->set_cs_timing = atmel_qspi_set_cs_timing; ctrl->bus_num = -1; ctrl->mem_ops = &atmel_qspi_mem_ops; ctrl->num_chipselect = 1; @@ -565,6 +650,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) init_completion(&aq->cmd_completion); aq->pdev = pdev; + aq->ops = &atmel_qspi_ops; /* Map the registers */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base"); diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c index b90571396a60..5015c2f6fd9f 100644 --- a/drivers/spi/spi-aspeed-smc.c +++ b/drivers/spi/spi-aspeed-smc.c @@ -239,7 +239,7 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip, ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, offset, op->cmd.opcode); if (ret < 0) - return ret; + goto stop_user; if (op->dummy.buswidth && op->dummy.nbytes) { for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++) @@ -249,8 +249,9 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip, aspeed_spi_set_io_mode(chip, io_mode); aspeed_spi_read_from_ahb(buf, chip->ahb_base, len); +stop_user: aspeed_spi_stop_user(chip); - return 0; + return ret; } static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip, @@ -261,10 +262,11 @@ static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip, aspeed_spi_start_user(chip); ret = aspeed_spi_send_cmd_addr(chip, op->addr.nbytes, op->addr.val, op->cmd.opcode); if (ret < 0) - return ret; + goto stop_user; aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes); +stop_user: aspeed_spi_stop_user(chip); - return 0; + return ret; } /* support for 1-1-1, 1-1-2 or 1-1-4 */ diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index b371e4eb41ec..02c5f00c8a57 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1509,6 +1509,12 @@ static int cqspi_request_mmap_dma(struct cqspi_st *cqspi) int ret = PTR_ERR(cqspi->rx_chan); cqspi->rx_chan = NULL; + if (ret == -ENODEV) { + /* DMA support is not mandatory */ + dev_info(&cqspi->pdev->dev, "No Rx DMA available\n"); + return 0; + } + return dev_err_probe(&cqspi->pdev->dev, ret, "No Rx DMA available\n"); } init_completion(&cqspi->rx_dma_complete); @@ -1769,10 +1775,9 @@ static int cqspi_remove(struct platform_device *pdev) static int cqspi_suspend(struct device *dev) { struct cqspi_st *cqspi = dev_get_drvdata(dev); - struct spi_master *master = dev_get_drvdata(dev); int ret; - ret = spi_master_suspend(master); + ret = spi_master_suspend(cqspi->master); cqspi_controller_enable(cqspi, 0); clk_disable_unprepare(cqspi->clk); @@ -1783,7 +1788,6 @@ static int cqspi_suspend(struct device *dev) static int cqspi_resume(struct device *dev) { struct cqspi_st *cqspi = dev_get_drvdata(dev); - struct spi_master *master = dev_get_drvdata(dev); clk_prepare_enable(cqspi->clk); cqspi_wait_idle(cqspi); @@ -1792,7 +1796,7 @@ static int cqspi_resume(struct device *dev) cqspi->current_cs = -1; cqspi->sclk = 0; - return spi_master_resume(master); + return spi_master_resume(cqspi->master); } static DEFINE_SIMPLE_DEV_PM_OPS(cqspi_dev_pm_ops, cqspi_suspend, cqspi_resume); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 01930b52c4fb..5374c6d44519 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ // // Copyright 2013 Freescale Semiconductor, Inc. -// Copyright 2020 NXP +// Copyright 2020-2025 NXP // // Freescale DSPI driver // This file contains a driver for the Freescale DSPI @@ -61,6 +61,7 @@ #define SPI_SR_TFIWF BIT(18) #define SPI_SR_RFDF BIT(17) #define SPI_SR_CMDFFF BIT(16) +#define SPI_SR_TXRXS BIT(30) #define SPI_SR_CLEAR (SPI_SR_TCFQF | \ SPI_SR_TFUF | SPI_SR_TFFF | \ SPI_SR_CMDTCF | SPI_SR_SPEF | \ @@ -907,9 +908,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, struct spi_device *spi = message->spi; struct spi_transfer *transfer; int status = 0; + u32 val = 0; + bool cs_change = false; message->actual_length = 0; + /* Put DSPI in running mode if halted. */ + regmap_read(dspi->regmap, SPI_MCR, &val); + if (val & SPI_MCR_HALT) { + regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, 0); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + !(val & SPI_SR_TXRXS)) + ; + } + list_for_each_entry(transfer, &message->transfers, transfer_list) { dspi->cur_transfer = transfer; dspi->cur_msg = message; @@ -934,6 +946,7 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, dspi->tx_cmd |= SPI_PUSHR_CMD_CONT; } + cs_change = transfer->cs_change; dspi->tx = transfer->tx_buf; dspi->rx = transfer->rx_buf; dspi->len = transfer->len; @@ -943,6 +956,8 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF, SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF); + regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); + spi_take_timestamp_pre(dspi->ctlr, dspi->cur_transfer, dspi->progress, !dspi->irq); @@ -966,6 +981,15 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, spi_transfer_delay_exec(transfer); } + if (status || !cs_change) { + /* Put DSPI in stop mode */ + regmap_update_bits(dspi->regmap, SPI_MCR, + SPI_MCR_HALT, SPI_MCR_HALT); + while (regmap_read(dspi->regmap, SPI_SR, &val) >= 0 && + val & SPI_SR_TXRXS) + ; + } + message->status = status; spi_finalize_current_message(ctlr); @@ -1128,6 +1152,20 @@ static int dspi_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume); +static const struct regmap_range dspi_yes_ranges[] = { + regmap_reg_range(SPI_MCR, SPI_MCR), + regmap_reg_range(SPI_TCR, SPI_CTAR(3)), + regmap_reg_range(SPI_SR, SPI_TXFR3), + regmap_reg_range(SPI_RXFR0, SPI_RXFR3), + regmap_reg_range(SPI_CTARE(0), SPI_CTARE(3)), + regmap_reg_range(SPI_SREX, SPI_SREX), +}; + +static const struct regmap_access_table dspi_access_table = { + .yes_ranges = dspi_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(dspi_yes_ranges), +}; + static const struct regmap_range dspi_volatile_ranges[] = { regmap_reg_range(SPI_MCR, SPI_TCR), regmap_reg_range(SPI_SR, SPI_SR), @@ -1145,6 +1183,8 @@ static const struct regmap_config dspi_regmap_config = { .reg_stride = 4, .max_register = 0x88, .volatile_table = &dspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }; static const struct regmap_range dspi_xspi_volatile_ranges[] = { @@ -1166,6 +1206,8 @@ static const struct regmap_config dspi_xspi_regmap_config[] = { .reg_stride = 4, .max_register = 0x13c, .volatile_table = &dspi_xspi_volatile_table, + .rd_table = &dspi_access_table, + .wr_table = &dspi_access_table, }, { .name = "pushr", @@ -1188,6 +1230,8 @@ static int dspi_init(struct fsl_dspi *dspi) if (!spi_controller_is_slave(dspi->ctlr)) mcr |= SPI_MCR_MASTER; + mcr |= SPI_MCR_HALT; + regmap_write(dspi->regmap, SPI_MCR, mcr); regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR); diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 7d016464037c..246d13323882 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -870,7 +870,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev) goto out_controller_put; } - ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, 0, + ret = devm_request_irq(&pdev->dev, irq, fsl_lpspi_isr, IRQF_NO_AUTOEN, dev_name(&pdev->dev), fsl_lpspi); if (ret) { dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret); @@ -907,14 +907,10 @@ static int fsl_lpspi_probe(struct platform_device *pdev) ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller); if (ret == -EPROBE_DEFER) goto out_pm_get; - if (ret < 0) - dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); - else - /* - * disable LPSPI module IRQ when enable DMA mode successfully, - * to prevent the unexpected LPSPI module IRQ events. - */ - disable_irq(irq); + if (ret < 0) { + dev_warn(&pdev->dev, "dma setup error %d, use pio\n", ret); + enable_irq(irq); + } ret = devm_spi_register_controller(&pdev->dev, controller); if (ret < 0) { diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index df73a2c7120c..13a6ebef0189 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1605,10 +1605,13 @@ static int spi_imx_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { + int ret; struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); unsigned long hz_per_byte, byte_limit; - spi_imx_setupxfer(spi, transfer); + ret = spi_imx_setupxfer(spi, transfer); + if (ret < 0) + return ret; transfer->effective_speed_hz = spi_imx->spi_bus_clk; /* flush rxfifo before transfer */ diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c index dd7de8fa37d0..ab29ae463f67 100644 --- a/drivers/spi/spi-loopback-test.c +++ b/drivers/spi/spi-loopback-test.c @@ -410,7 +410,7 @@ MODULE_LICENSE("GPL"); static void spi_test_print_hex_dump(char *pre, const void *ptr, size_t len) { /* limit the hex_dump */ - if (len < 1024) { + if (len <= 1024) { print_hex_dump(KERN_INFO, pre, DUMP_PREFIX_OFFSET, 16, 1, ptr, len, 0); diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c index 7b64e64c65cf..2badf6535b30 100644 --- a/drivers/spi/spi-mpc52xx.c +++ b/drivers/spi/spi-mpc52xx.c @@ -519,6 +519,7 @@ static int mpc52xx_spi_remove(struct platform_device *op) struct mpc52xx_spi *ms = spi_master_get_devdata(master); int i; + cancel_work_sync(&ms->work); free_irq(ms->irq0, ms); free_irq(ms->irq1, ms); diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 55178579f3c6..b951bd5efdcd 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -39,6 +39,7 @@ #include #include #include +#include #define DRIVER_NAME "mxs-spi" @@ -252,7 +253,7 @@ static int mxs_spi_txrx_dma(struct mxs_spi *spi, desc = dmaengine_prep_slave_sg(ssp->dmach, &dma_xfer[sg_count].sg, 1, (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END); if (!desc) { dev_err(ssp->dev, diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c index 24ec1c83f379..ec0904faf3a1 100644 --- a/drivers/spi/spi-rpc-if.c +++ b/drivers/spi/spi-rpc-if.c @@ -58,7 +58,7 @@ static void rpcif_spi_mem_prepare(struct spi_device *spi_dev, rpc_op.data.dir = RPCIF_NO_DATA; } - rpcif_prepare(rpc, &rpc_op, offs, len); + rpcif_prepare(rpc->dev, &rpc_op, offs, len); } static bool rpcif_spi_mem_supports_op(struct spi_mem *mem, @@ -86,7 +86,7 @@ static ssize_t rpcif_spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, rpcif_spi_mem_prepare(desc->mem->spi, &desc->info.op_tmpl, &offs, &len); - return rpcif_dirmap_read(rpc, offs, len, buf); + return rpcif_dirmap_read(rpc->dev, offs, len, buf); } static int rpcif_spi_mem_dirmap_create(struct spi_mem_dirmap_desc *desc) @@ -117,7 +117,7 @@ static int rpcif_spi_mem_exec_op(struct spi_mem *mem, rpcif_spi_mem_prepare(mem->spi, op, NULL, NULL); - return rpcif_manual_xfer(rpc); + return rpcif_manual_xfer(rpc->dev); } static const struct spi_controller_mem_ops rpcif_spi_mem_ops = { @@ -147,7 +147,7 @@ static int rpcif_spi_probe(struct platform_device *pdev) ctlr->dev.of_node = parent->of_node; - rpcif_enable_rpm(rpc); + pm_runtime_enable(rpc->dev); ctlr->num_chipselect = 1; ctlr->mem_ops = &rpcif_spi_mem_ops; @@ -156,7 +156,7 @@ static int rpcif_spi_probe(struct platform_device *pdev) ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_QUAD | SPI_RX_QUAD; ctlr->flags = SPI_CONTROLLER_HALF_DUPLEX; - error = rpcif_hw_init(rpc, false); + error = rpcif_hw_init(rpc->dev, false); if (error) goto out_disable_rpm; @@ -169,7 +169,7 @@ static int rpcif_spi_probe(struct platform_device *pdev) return 0; out_disable_rpm: - rpcif_disable_rpm(rpc); + pm_runtime_disable(rpc->dev); return error; } @@ -179,7 +179,7 @@ static int rpcif_spi_remove(struct platform_device *pdev) struct rpcif *rpc = spi_controller_get_devdata(ctlr); spi_unregister_controller(ctlr); - rpcif_disable_rpm(rpc); + pm_runtime_disable(rpc->dev); return 0; } diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c index 6000d0761206..6937f5c4d868 100644 --- a/drivers/spi/spi-sun4i.c +++ b/drivers/spi/spi-sun4i.c @@ -263,6 +263,9 @@ static int sun4i_spi_transfer_one(struct spi_master *master, else reg |= SUN4I_CTL_DHB; + /* Now that the settings are correct, enable the interface */ + reg |= SUN4I_CTL_ENABLE; + sun4i_spi_write(sspi, SUN4I_CTL_REG, reg); /* Ensure that we have a parent clock fast enough */ @@ -403,7 +406,7 @@ static int sun4i_spi_runtime_resume(struct device *dev) } sun4i_spi_write(sspi, SUN4I_CTL_REG, - SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP); + SUN4I_CTL_MASTER | SUN4I_CTL_TP); return 0; diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index 06c54d49076a..442d42130ec8 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -340,7 +340,7 @@ tegra_qspi_fill_tx_fifo_from_client_txbuf(struct tegra_qspi *tqspi, struct spi_t for (count = 0; count < max_n_32bit; count++) { u32 x = 0; - for (i = 0; len && (i < bytes_per_word); i++, len--) + for (i = 0; len && (i < min(4, bytes_per_word)); i++, len--) x |= (u32)(*tx_buf++) << (i * 8); tegra_qspi_writel(tqspi, x, QSPI_TX_FIFO); } @@ -1110,9 +1110,9 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, (&tqspi->xfer_completion, QSPI_DMA_TIMEOUT); - if (WARN_ON(ret == 0)) { - dev_err(tqspi->dev, "QSPI Transfer failed with timeout: %d\n", - ret); + if (WARN_ON_ONCE(ret == 0)) { + dev_err_ratelimited(tqspi->dev, + "QSPI Transfer failed with timeout\n"); if (tqspi->is_curr_dma_xfer && (tqspi->cur_direction & DATA_DIR_TX)) dmaengine_terminate_all diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c index 78f31b61a2aa..77ea6b522348 100644 --- a/drivers/spi/spi-zynq-qspi.c +++ b/drivers/spi/spi-zynq-qspi.c @@ -379,12 +379,21 @@ static int zynq_qspi_setup_op(struct spi_device *spi) { struct spi_controller *ctlr = spi->master; struct zynq_qspi *qspi = spi_controller_get_devdata(ctlr); + int ret; if (ctlr->busy) return -EBUSY; - clk_enable(qspi->refclk); - clk_enable(qspi->pclk); + ret = clk_enable(qspi->refclk); + if (ret) + return ret; + + ret = clk_enable(qspi->pclk); + if (ret) { + clk_disable(qspi->refclk); + return ret; + } + zynq_qspi_write(qspi, ZYNQ_QSPI_ENABLE_OFFSET, ZYNQ_QSPI_ENABLE_ENABLE_MASK); diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 3b56d5e7080e..fde7c3810359 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -698,7 +698,6 @@ static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi) static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) { struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_id; - irqreturn_t ret = IRQ_NONE; u32 status, mask, dma_status = 0; status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST); @@ -713,27 +712,24 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id) dma_status); } - if (mask & GQSPI_ISR_TXNOT_FULL_MASK) { - zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL); - ret = IRQ_HANDLED; - } + if (!mask && !dma_status) + return IRQ_NONE; - if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) { + if (mask & GQSPI_ISR_TXNOT_FULL_MASK) + zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL); + + if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) zynqmp_process_dma_irq(xqspi); - ret = IRQ_HANDLED; - } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && - (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) { + else if (!(mask & GQSPI_IER_RXEMPTY_MASK) && + (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL); - ret = IRQ_HANDLED; - } if (xqspi->bytes_to_receive == 0 && xqspi->bytes_to_transfer == 0 && ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) { zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK); complete(&xqspi->data_completion); - ret = IRQ_HANDLED; } - return ret; + return IRQ_HANDLED; } /** @@ -1219,6 +1215,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev) clk_dis_all: pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); clk_disable_unprepare(xqspi->refclk); @@ -1249,6 +1246,7 @@ static int zynqmp_qspi_remove(struct platform_device *pdev) zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0); pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); clk_disable_unprepare(xqspi->refclk); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 50fe5aa450f8..df774fb148d8 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -425,6 +425,16 @@ static int spi_probe(struct device *dev) spi->irq = 0; } + if (has_acpi_companion(dev) && spi->irq < 0) { + struct acpi_device *adev = to_acpi_device_node(dev->fwnode); + + spi->irq = acpi_dev_gpio_irq_get(adev, 0); + if (spi->irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (spi->irq < 0) + spi->irq = 0; + } + ret = dev_pm_domain_attach(dev, true); if (ret) return ret; @@ -2697,9 +2707,6 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, sizeof(spi->modalias)); - if (spi->irq < 0) - spi->irq = acpi_dev_gpio_irq_get(adev, 0); - acpi_device_set_enumerated(adev); adev->power.flags.ignore_parent = true; diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c index 0a85ea667a1b..3369892eacf4 100644 --- a/drivers/staging/axis-fifo/axis-fifo.c +++ b/drivers/staging/axis-fifo/axis-fifo.c @@ -400,16 +400,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET); if (!bytes_available) { - dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n"); - reset_ip_core(fifo); + dev_err(fifo->dt_device, "received a packet of length 0\n"); ret = -EIO; goto end_unlock; } if (bytes_available > len) { - dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n", + dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n", bytes_available, len); - reset_ip_core(fifo); ret = -EINVAL; goto end_unlock; } @@ -418,8 +416,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, /* this probably can't happen unless IP * registers were previously mishandled */ - dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n"); - reset_ip_core(fifo); + dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n"); ret = -EIO; goto end_unlock; } @@ -440,7 +437,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf, if (copy_to_user(buf + copied * sizeof(u32), tmp_buf, copy * sizeof(u32))) { - reset_ip_core(fifo); ret = -EFAULT; goto end_unlock; } @@ -549,7 +545,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf, if (copy_from_user(tmp_buf, buf + copied * sizeof(u32), copy * sizeof(u32))) { - reset_ip_core(fifo); ret = -EFAULT; goto end_unlock; } @@ -782,9 +777,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo) goto end; } - /* IP sets TDFV to fifo depth - 4 so we will do the same */ - fifo->tx_fifo_depth -= 4; - ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo); if (ret) { dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n"); diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c index 6c14d7bcdd67..081b17f49863 100644 --- a/drivers/staging/iio/adc/ad7816.c +++ b/drivers/staging/iio/adc/ad7816.c @@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev, struct iio_dev *indio_dev = dev_to_iio_dev(dev); struct ad7816_chip_info *chip = iio_priv(indio_dev); - if (strcmp(buf, "full")) { + if (strcmp(buf, "full") == 0) { gpiod_set_value(chip->rdwr_pin, 1); chip->mode = AD7816_FULL; } else { diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c index d58d99d8375e..b40609b92980 100644 --- a/drivers/staging/iio/frequency/ad9832.c +++ b/drivers/staging/iio/frequency/ad9832.c @@ -158,7 +158,7 @@ static int ad9832_write_frequency(struct ad9832_state *st, static int ad9832_write_phase(struct ad9832_state *st, unsigned long addr, unsigned long phase) { - if (phase > BIT(AD9832_PHASE_BITS)) + if (phase >= BIT(AD9832_PHASE_BITS)) return -EINVAL; st->phase_data[0] = cpu_to_be16((AD9832_CMD_PHA8BITSW << CMD_SHIFT) | diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c index 89abb0709260..f66688ee7f4a 100644 --- a/drivers/staging/iio/frequency/ad9834.c +++ b/drivers/staging/iio/frequency/ad9834.c @@ -131,7 +131,7 @@ static int ad9834_write_frequency(struct ad9834_state *st, static int ad9834_write_phase(struct ad9834_state *st, unsigned long addr, unsigned long phase) { - if (phase > BIT(AD9834_PHASE_BITS)) + if (phase >= BIT(AD9834_PHASE_BITS)) return -EINVAL; st->data = cpu_to_be16(addr | phase); diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c index 67915d76a87f..34113bea31d5 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_params.c +++ b/drivers/staging/media/atomisp/pci/sh_css_params.c @@ -4137,6 +4137,8 @@ ia_css_3a_statistics_allocate(const struct ia_css_3a_grid_info *grid) goto err; /* No weighted histogram, no structure, treat the histogram data as a byte dump in a byte array */ me->rgby_data = kvmalloc(sizeof_hmem(HMEM0_ID), GFP_KERNEL); + if (!me->rgby_data) + goto err; IA_CSS_LEAVE("return=%p", me); return me; diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c index 59f1eb7b62bc..3771bc410dff 100644 --- a/drivers/staging/media/imx/imx-media-of.c +++ b/drivers/staging/media/imx/imx-media-of.c @@ -55,22 +55,18 @@ int imx_media_add_of_subdevs(struct imx_media_dev *imxmd, break; ret = imx_media_of_add_csi(imxmd, csi_np); + of_node_put(csi_np); if (ret) { /* unavailable or already added is not an error */ if (ret == -ENODEV || ret == -EEXIST) { - of_node_put(csi_np); continue; } /* other error, can't continue */ - goto err_out; + return ret; } } return 0; - -err_out: - of_node_put(csi_np); - return ret; } EXPORT_SYMBOL_GPL(imx_media_add_of_subdevs); diff --git a/drivers/staging/media/max96712/max96712.c b/drivers/staging/media/max96712/max96712.c index 99b333b68198..f5805dc70ece 100644 --- a/drivers/staging/media/max96712/max96712.c +++ b/drivers/staging/media/max96712/max96712.c @@ -376,7 +376,6 @@ static int max96712_probe(struct i2c_client *client) return -ENOMEM; priv->client = client; - i2c_set_clientdata(client, priv); priv->regmap = devm_regmap_init_i2c(client, &max96712_i2c_regmap); if (IS_ERR(priv->regmap)) @@ -409,7 +408,8 @@ static int max96712_probe(struct i2c_client *client) static void max96712_remove(struct i2c_client *client) { - struct max96712_priv *priv = i2c_get_clientdata(client); + struct v4l2_subdev *sd = i2c_get_clientdata(client); + struct max96712_priv *priv = container_of(sd, struct max96712_priv, sd); v4l2_async_unregister_subdev(&priv->sd); diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig index f23e29b679fb..14afcbbd6104 100644 --- a/drivers/staging/rtl8723bs/Kconfig +++ b/drivers/staging/rtl8723bs/Kconfig @@ -5,6 +5,7 @@ config RTL8723BS depends on m select CFG80211_WEXT select CRYPTO + select CRYPTO_LIB_AES select CRYPTO_LIB_ARC4 help This option enables support for RTL8723BS SDIO drivers, such as diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 705c5e283c27..456a9508fb91 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -115,11 +115,6 @@ struct vchiq_arm_state { int first_connect; }; -struct vchiq_2835_state { - int inited; - struct vchiq_arm_state arm_state; -}; - struct vchiq_pagelist_info { struct pagelist *pagelist; size_t pagelist_buffer_size; @@ -574,29 +569,21 @@ vchiq_arm_init_state(struct vchiq_state *state, int vchiq_platform_init_state(struct vchiq_state *state) { - struct vchiq_2835_state *platform_state; + struct vchiq_arm_state *platform_state; - state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL); - if (!state->platform_state) + platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); + if (!platform_state) return -ENOMEM; - platform_state = (struct vchiq_2835_state *)state->platform_state; - - platform_state->inited = 1; - vchiq_arm_init_state(state, &platform_state->arm_state); + vchiq_arm_init_state(state, platform_state); + state->platform_state = (struct opaque_platform_state *)platform_state; return 0; } static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state) { - struct vchiq_2835_state *platform_state; - - platform_state = (struct vchiq_2835_state *)state->platform_state; - - WARN_ON_ONCE(!platform_state->inited); - - return &platform_state->arm_state; + return (struct vchiq_arm_state *)state->platform_state; } void diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 07e196b44b91..04d40e76772b 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4314,8 +4314,8 @@ int iscsit_close_connection( spin_unlock(&iscsit_global->ts_bitmap_lock); iscsit_stop_timers_for_cmds(conn); - iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); + iscsit_stop_nopin_response_timer(conn); if (conn->conn_transport->iscsit_wait_conn) conn->conn_transport->iscsit_wait_conn(conn); diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c index 322a543b8c27..d0f397c90242 100644 --- a/drivers/tee/optee/supp.c +++ b/drivers/tee/optee/supp.c @@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, struct optee *optee = tee_get_drvdata(ctx->teedev); struct optee_supp *supp = &optee->supp; struct optee_supp_req *req; - bool interruptable; u32 ret; /* @@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, /* * Wait for supplicant to process and return result, once we've * returned from wait_for_completion(&req->c) successfully we have - * exclusive access again. + * exclusive access again. Allow the wait to be killable such that + * the wait doesn't turn into an indefinite state if the supplicant + * gets hung for some reason. */ - while (wait_for_completion_interruptible(&req->c)) { + if (wait_for_completion_killable(&req->c)) { mutex_lock(&supp->mutex); - interruptable = !supp->ctx; - if (interruptable) { - /* - * There's no supplicant available and since the - * supp->mutex currently is held none can - * become available until the mutex released - * again. - * - * Interrupting an RPC to supplicant is only - * allowed as a way of slightly improving the user - * experience in case the supplicant hasn't been - * started yet. During normal operation the supplicant - * will serve all requests in a timely manner and - * interrupting then wouldn't make sense. - */ - if (req->in_queue) { - list_del(&req->link); - req->in_queue = false; - } + if (req->in_queue) { + list_del(&req->link); + req->in_queue = false; } mutex_unlock(&supp->mutex); - - if (interruptable) { - req->ret = TEEC_ERROR_COMMUNICATION; - break; - } + req->ret = TEEC_ERROR_COMMUNICATION; } ret = req->ret; diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index 9f8b438fcf8f..bac0f52a361b 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -57,8 +57,6 @@ struct time_in_idle { * @max_level: maximum cooling level. One less than total number of valid * cpufreq frequencies. * @em: Reference on the Energy Model of the device - * @cdev: thermal_cooling_device pointer to keep track of the - * registered cooling device. * @policy: cpufreq policy. * @cooling_ops: cpufreq callbacks to thermal cooling device ops * @idle_time: idle time stats diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index db8a6f63657d..9280241518ee 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -145,7 +145,7 @@ static ssize_t current_uuid_show(struct device *dev, struct int3400_thermal_priv *priv = dev_get_drvdata(dev); int i, length = 0; - if (priv->current_uuid_index > 0) + if (priv->current_uuid_index >= 0) return sprintf(buf, "%s\n", int3400_thermal_uuids[priv->current_uuid_index]); diff --git a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c index 43fa351e2b9e..b7fdf25bfd23 100644 --- a/drivers/thermal/intel/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3402_thermal.c @@ -45,6 +45,9 @@ static int int3402_thermal_probe(struct platform_device *pdev) struct int3402_thermal_data *d; int ret; + if (!adev) + return -ENODEV; + if (!acpi_has_method(adev->handle, "_TMP")) return -ENODEV; diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index d111e218f362..b33cb1d880b7 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -19,6 +19,7 @@ #define SITES_MAX 16 #define TMR_DISABLE 0x0 #define TMR_ME 0x80000000 +#define TMR_CMD BIT(29) #define TMR_ALPF 0x0c000000 #define TMR_ALPF_V2 0x03000000 #define TMTMIR_DEFAULT 0x0000000f @@ -345,6 +346,12 @@ static int __maybe_unused qoriq_tmu_suspend(struct device *dev) if (ret) return ret; + if (data->ver > TMU_VER1) { + ret = regmap_set_bits(data->regmap, REGS_TMR, TMR_CMD); + if (ret) + return ret; + } + clk_disable_unprepare(data->clk); return 0; @@ -359,6 +366,12 @@ static int __maybe_unused qoriq_tmu_resume(struct device *dev) if (ret) return ret; + if (data->ver > TMU_VER1) { + ret = regmap_clear_bits(data->regmap, REGS_TMR, TMR_CMD); + if (ret) + return ret; + } + /* Enable monitoring */ return regmap_update_bits(data->regmap, REGS_TMR, TMR_ME, TMR_ME); } diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 617ea9d3c66c..373eee1e6dc3 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -560,6 +560,7 @@ static const struct tsadc_table rk3328_code_table[] = { {296, -40000}, {304, -35000}, {313, -30000}, + {322, -25000}, {331, -20000}, {340, -15000}, {349, -10000}, diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index ebb36b2c72d5..ba6f44f8b262 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -1271,6 +1271,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t thermal_zone_destroy_device_groups(tz); goto remove_id; } + thermal_zone_device_init(tz); result = device_register(&tz->device); if (result) goto release_device; @@ -1313,7 +1314,6 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t INIT_DELAYED_WORK(&tz->poll_queue, thermal_zone_device_check); - thermal_zone_device_init(tz); /* Update the new thermal zone and mark it as already updated. */ if (atomic_cmpxchg(&tz->need_update, 1, 0)) thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED); diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c index 07476147559a..95939df314d1 100644 --- a/drivers/thermal/thermal_of.c +++ b/drivers/thermal/thermal_of.c @@ -315,6 +315,7 @@ static struct device_node *of_thermal_zone_find(struct device_node *sensor, int goto out; } + of_node_put(sensor_specs.np); if ((sensor == sensor_specs.np) && id == (sensor_specs.args_count ? sensor_specs.args[0] : 0)) { pr_debug("sensor %pOFn id=%d belongs to %pOFn\n", sensor, id, child); diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 288aaa05d007..56a9222b439a 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -1479,6 +1479,18 @@ static struct pci_device_id nhi_ids[] = { .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1), .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) }, diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 0f029ce75882..16744f25a9a0 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -90,6 +90,12 @@ extern const struct tb_nhi_ops icl_nhi_ops; #define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21 #define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e #define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d +#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833 +#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834 +#define PCI_DEVICE_ID_INTEL_PTL_M_NHI0 0xe333 +#define PCI_DEVICE_ID_INTEL_PTL_M_NHI1 0xe334 +#define PCI_DEVICE_ID_INTEL_PTL_P_NHI0 0xe433 +#define PCI_DEVICE_ID_INTEL_PTL_P_NHI1 0xe434 #define PCI_CLASS_SERIAL_USB_USB4 0x0c0340 diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index edbd92435b41..d2038337ea03 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -89,15 +89,18 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt) if (ret) goto err_nvm; - ret = tb_nvm_add_non_active(nvm, nvm_write); - if (ret) - goto err_nvm; + if (!rt->no_nvm_upgrade) { + ret = tb_nvm_add_non_active(nvm, nvm_write); + if (ret) + goto err_nvm; + } rt->nvm = nvm; return 0; err_nvm: dev_dbg(&rt->dev, "NVM upgrade disabled\n"); + rt->no_nvm_upgrade = true; if (!IS_ERR(nvm)) tb_nvm_free(nvm); @@ -177,8 +180,6 @@ static ssize_t nvm_authenticate_show(struct device *dev, if (!rt->nvm) ret = -EAGAIN; - else if (rt->no_nvm_upgrade) - ret = -EOPNOTSUPP; else ret = sysfs_emit(buf, "%#x\n", rt->auth_status); @@ -304,6 +305,19 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(vendor); +static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr, + int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct tb_retimer *rt = tb_to_retimer(dev); + + if (attr == &dev_attr_nvm_authenticate.attr || + attr == &dev_attr_nvm_version.attr) + return rt->no_nvm_upgrade ? 0 : attr->mode; + + return attr->mode; +} + static struct attribute *retimer_attrs[] = { &dev_attr_device.attr, &dev_attr_nvm_authenticate.attr, @@ -313,6 +327,7 @@ static struct attribute *retimer_attrs[] = { }; static const struct attribute_group retimer_group = { + .is_visible = retimer_is_visible, .attrs = retimer_attrs, }; diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index c5e4fa478e64..0668e1645bc5 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -640,11 +640,15 @@ static void tb_scan_port(struct tb_port *port) goto out_rpm_put; } - tb_retimer_scan(port, true); - sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, tb_downstream_route(port)); if (IS_ERR(sw)) { + /* + * Make the downstream retimers available even if there + * is no router connected. + */ + tb_retimer_scan(port, true); + /* * If there is an error accessing the connected switch * it may be connected to another domain. Also we allow @@ -704,6 +708,14 @@ static void tb_scan_port(struct tb_port *port) tb_switch_lane_bonding_enable(sw); /* Set the link configured */ tb_switch_configure_link(sw); + /* + * Scan for downstream retimers. We only scan them after the + * router has been enumerated to avoid issues with certain + * Pluggable devices that expect the host to enumerate them + * within certain timeout. + */ + tb_retimer_scan(port, true); + /* * CL0s and CL1 are enabled and supported together. * Silently ignore CLx enabling in case CLx is not supported. @@ -1093,6 +1105,37 @@ static void tb_exit_redrive(struct tb_port *port) } } +static void tb_switch_enter_redrive(struct tb_switch *sw) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) + tb_enter_redrive(port); +} + +/* + * Called during system and runtime suspend to forcefully exit redrive + * mode without querying whether the resource is available. + */ +static void tb_switch_exit_redrive(struct tb_switch *sw) +{ + struct tb_port *port; + + if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE)) + return; + + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_dpin(port)) + continue; + + if (port->redrive) { + port->redrive = false; + pm_runtime_put(&sw->dev); + tb_port_dbg(port, "exit redrive mode\n"); + } + } +} + static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) { struct tb_port *in, *out; @@ -1548,6 +1591,7 @@ static int tb_start(struct tb *tb) tb_create_usb3_tunnels(tb->root_switch); /* Add DP IN resources for the root switch */ tb_add_dp_resources(tb->root_switch); + tb_switch_enter_redrive(tb->root_switch); /* Make the discovered switches available to the userspace */ device_for_each_child(&tb->root_switch->dev, NULL, tb_scan_finalize_switch); @@ -1563,6 +1607,7 @@ static int tb_suspend_noirq(struct tb *tb) tb_dbg(tb, "suspending...\n"); tb_disconnect_and_release_dp(tb); + tb_switch_exit_redrive(tb->root_switch); tb_switch_suspend(tb->root_switch, false); tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ tb_dbg(tb, "suspend finished\n"); @@ -1665,6 +1710,7 @@ static int tb_resume_noirq(struct tb *tb) tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n"); msleep(100); } + tb_switch_enter_redrive(tb->root_switch); /* Allow tb_handle_hotplug to progress events */ tcm->hotplug_active = true; tb_dbg(tb, "resume finished\n"); @@ -1728,6 +1774,12 @@ static int tb_runtime_suspend(struct tb *tb) struct tb_cm *tcm = tb_priv(tb); mutex_lock(&tb->lock); + /* + * The below call only releases DP resources to allow exiting and + * re-entering redrive mode. + */ + tb_disconnect_and_release_dp(tb); + tb_switch_exit_redrive(tb->root_switch); tb_switch_suspend(tb->root_switch, true); tcm->hotplug_active = false; mutex_unlock(&tb->lock); @@ -1759,6 +1811,7 @@ static int tb_runtime_resume(struct tb *tb) tb_restore_children(tb->root_switch); list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) tb_tunnel_restart(tunnel); + tb_switch_enter_redrive(tb->root_switch); tcm->hotplug_active = true; mutex_unlock(&tb->lock); diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h index a1c9b0002550..983391f500f1 100644 --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h @@ -353,6 +353,7 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt) #ifdef CONFIG_SERIAL_8250_DMA extern int serial8250_tx_dma(struct uart_8250_port *); +extern void serial8250_tx_dma_flush(struct uart_8250_port *); extern int serial8250_rx_dma(struct uart_8250_port *); #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI) extern int serial8250_start_rx_dma(struct uart_8250_port *); @@ -388,6 +389,7 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p) { return -1; } +static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { } static inline int serial8250_rx_dma(struct uart_8250_port *p) { return -1; diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index c3816168ab78..739a42bcacf4 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -185,6 +185,22 @@ err: return ret; } +void serial8250_tx_dma_flush(struct uart_8250_port *p) +{ + struct uart_8250_dma *dma = p->dma; + + if (!dma->tx_running) + return; + + /* + * kfifo_reset() has been called by the serial core, avoid + * advancing and underflowing in __dma_tx_complete(). + */ + dma->tx_size = 0; + + dmaengine_terminate_async(dma->txchan); +} + #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI) int serial8250_rx_dma(struct uart_8250_port *p) diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 88025c347b3a..dd019fe86619 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -861,7 +861,7 @@ static const struct dw8250_platform_data dw8250_renesas_rzn1_data = { .quirks = DW_UART_QUIRK_IS_DMA_FC, }; -static const struct dw8250_platform_data dw8250_starfive_jh7100_data = { +static const struct dw8250_platform_data dw8250_skip_set_rate_data = { .usr_reg = DW_UART_USR, .quirks = DW_UART_QUIRK_SKIP_SET_RATE, }; @@ -872,7 +872,8 @@ static const struct of_device_id dw8250_of_match[] = { { .compatible = "cavium,octeon-3860-uart", .data = &dw8250_octeon_3860_data }, { .compatible = "marvell,armada-38x-uart", .data = &dw8250_armada_38x_data }, { .compatible = "renesas,rzn1-uart", .data = &dw8250_renesas_rzn1_data }, - { .compatible = "starfive,jh7100-uart", .data = &dw8250_starfive_jh7100_data }, + { .compatible = "sophgo,sg2044-uart", .data = &dw8250_skip_set_rate_data }, + { .compatible = "starfive,jh7100-uart", .data = &dw8250_skip_set_rate_data }, #endif { /* Sentinel */ } }; diff --git a/drivers/tty/serial/8250/8250_fintek.c b/drivers/tty/serial/8250/8250_fintek.c index e2aa2a1a02dd..ecbce226b874 100644 --- a/drivers/tty/serial/8250/8250_fintek.c +++ b/drivers/tty/serial/8250/8250_fintek.c @@ -21,6 +21,7 @@ #define CHIP_ID_F81866 0x1010 #define CHIP_ID_F81966 0x0215 #define CHIP_ID_F81216AD 0x1602 +#define CHIP_ID_F81216E 0x1617 #define CHIP_ID_F81216H 0x0501 #define CHIP_ID_F81216 0x0802 #define VENDOR_ID1 0x23 @@ -158,6 +159,7 @@ static int fintek_8250_check_id(struct fintek_8250 *pdata) case CHIP_ID_F81866: case CHIP_ID_F81966: case CHIP_ID_F81216AD: + case CHIP_ID_F81216E: case CHIP_ID_F81216H: case CHIP_ID_F81216: break; @@ -181,6 +183,7 @@ static int fintek_8250_get_ldn_range(struct fintek_8250 *pdata, int *min, return 0; case CHIP_ID_F81216AD: + case CHIP_ID_F81216E: case CHIP_ID_F81216H: case CHIP_ID_F81216: *min = F81216_LDN_LOW; @@ -250,6 +253,7 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level) break; case CHIP_ID_F81216AD: + case CHIP_ID_F81216E: case CHIP_ID_F81216H: case CHIP_ID_F81216: sio_write_mask_reg(pdata, FINTEK_IRQ_MODE, IRQ_SHARE, @@ -263,7 +267,8 @@ static void fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool is_level) static void fintek_8250_set_max_fifo(struct fintek_8250 *pdata) { switch (pdata->pid) { - case CHIP_ID_F81216H: /* 128Bytes FIFO */ + case CHIP_ID_F81216E: /* 128Bytes FIFO */ + case CHIP_ID_F81216H: case CHIP_ID_F81966: case CHIP_ID_F81866: sio_write_mask_reg(pdata, FIFO_CTRL, @@ -297,6 +302,7 @@ static void fintek_8250_set_termios(struct uart_port *port, goto exit; switch (pdata->pid) { + case CHIP_ID_F81216E: case CHIP_ID_F81216H: reg = RS485; break; @@ -346,6 +352,7 @@ static void fintek_8250_set_termios_handler(struct uart_8250_port *uart) struct fintek_8250 *pdata = uart->port.private_data; switch (pdata->pid) { + case CHIP_ID_F81216E: case CHIP_ID_F81216H: case CHIP_ID_F81966: case CHIP_ID_F81866: @@ -438,6 +445,11 @@ static void fintek_8250_set_rs485_handler(struct uart_8250_port *uart) uart->port.rs485_supported = fintek_8250_rs485_supported; break; + case CHIP_ID_F81216E: /* F81216E does not support RS485 delays */ + uart->port.rs485_config = fintek_8250_rs485_config; + uart->port.rs485_supported = fintek_8250_rs485_supported; + break; + default: /* No RS485 Auto direction functional */ break; } diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index a3eaf293f204..66fbc5ad8c72 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -749,12 +749,12 @@ static void omap_8250_shutdown(struct uart_port *port) struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = port->private_data; + pm_runtime_get_sync(port->dev); + flush_work(&priv->qos_work); if (up->dma) omap_8250_rx_dma_flush(up); - pm_runtime_get_sync(port->dev); - serial_out(up, UART_OMAP_WER, 0); if (priv->habit & UART_HAS_EFR2) serial_out(up, UART_OMAP_EFR2, 0x0); diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 38fb7126ab0e..cef5b346341c 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -66,6 +66,8 @@ static const struct pci_device_id pci_use_msi[] = { 0xA000, 0x1000) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9922, 0xA000, 0x1000) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100, + 0xA000, 0x1000) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_HP_3PAR, PCI_DEVICE_ID_HPE_PCI_SERIAL, PCI_ANY_ID, PCI_ANY_ID) }, { } @@ -2594,6 +2596,22 @@ static struct pci_serial_quirk pci_serial_quirks[] = { .init = pci_oxsemi_tornado_init, .setup = pci_oxsemi_tornado_setup, }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4026, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, + { + .vendor = PCI_VENDOR_ID_INTASHIELD, + .device = 0x4021, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .init = pci_oxsemi_tornado_init, + .setup = pci_oxsemi_tornado_setup, + }, { .vendor = PCI_VENDOR_ID_INTEL, .device = 0x8811, @@ -5125,6 +5143,14 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0BA2, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0BA3, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_2_115200 }, /* * Brainboxes UC-235/246 */ @@ -5245,6 +5271,14 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C42, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0C43, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_b2_4_115200 }, /* * Brainboxes UC-420 */ @@ -5471,6 +5505,20 @@ static const struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_oxsemi_1_15625000 }, + /* + * Brainboxes XC-235 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4026, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, + /* + * Brainboxes XC-475 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x4021, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, + pbn_oxsemi_1_15625000 }, /* * Perle PCI-RAS cards @@ -5890,6 +5938,14 @@ static const struct pci_device_id serial_pci_tbl[] = { { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, 0xA000, 0x3004, 0, 0, pbn_b0_bt_4_115200 }, + + /* + * ASIX AX99100 PCIe to Multi I/O Controller + */ + { PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + /* Intel CE4100 */ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CE4100_UART, PCI_ANY_ID, PCI_ANY_ID, 0, 0, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 47d601e0e657..9602ecf6d2e8 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1711,7 +1711,7 @@ static void serial8250_disable_ms(struct uart_port *port) if (up->bugs & UART_BUG_NOMSR) return; - mctrl_gpio_disable_ms(up->gpios); + mctrl_gpio_disable_ms_no_sync(up->gpios); up->ier &= ~UART_IER_MSI; serial_port_out(port, UART_IER, up->ier); @@ -2154,7 +2154,8 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) serial8250_rpm_put(up); } -static void wait_for_lsr(struct uart_8250_port *up, int bits) +/* Returns true if @bits were set, false on timeout */ +static bool wait_for_lsr(struct uart_8250_port *up, int bits) { unsigned int status, tmout = 10000; @@ -2169,11 +2170,11 @@ static void wait_for_lsr(struct uart_8250_port *up, int bits) udelay(1); touch_nmi_watchdog(); } + + return (tmout != 0); } -/* - * Wait for transmitter & holding register to empty - */ +/* Wait for transmitter and holding register to empty with timeout */ static void wait_for_xmitr(struct uart_8250_port *up, int bits) { unsigned int tmout; @@ -2616,6 +2617,14 @@ static unsigned int npcm_get_divisor(struct uart_8250_port *up, return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2; } +static void serial8250_flush_buffer(struct uart_port *port) +{ + struct uart_8250_port *up = up_to_u8250p(port); + + if (up->dma) + serial8250_tx_dma_flush(up); +} + static unsigned int serial8250_do_get_divisor(struct uart_port *port, unsigned int baud, unsigned int *frac) @@ -3376,6 +3385,7 @@ static const struct uart_ops serial8250_pops = { .break_ctl = serial8250_break_ctl, .startup = serial8250_startup, .shutdown = serial8250_shutdown, + .flush_buffer = serial8250_flush_buffer, .set_termios = serial8250_set_termios, .set_ldisc = serial8250_set_ldisc, .pm = serial8250_pm, @@ -3466,6 +3476,16 @@ static void serial8250_console_restore(struct uart_8250_port *up) serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } +static void fifo_wait_for_lsr(struct uart_8250_port *up, unsigned int count) +{ + unsigned int i; + + for (i = 0; i < count; i++) { + if (wait_for_lsr(up, UART_LSR_THRE)) + return; + } +} + /* * Print a string to the serial port using the device FIFO * @@ -3475,13 +3495,15 @@ static void serial8250_console_restore(struct uart_8250_port *up) static void serial8250_console_fifo_write(struct uart_8250_port *up, const char *s, unsigned int count) { - int i; const char *end = s + count; unsigned int fifosize = up->tx_loadsz; + unsigned int tx_count = 0; bool cr_sent = false; + unsigned int i; while (s != end) { - wait_for_lsr(up, UART_LSR_THRE); + /* Allow timeout for each byte of a possibly full FIFO */ + fifo_wait_for_lsr(up, fifosize); for (i = 0; i < fifosize && s != end; ++i) { if (*s == '\n' && !cr_sent) { @@ -3492,7 +3514,14 @@ static void serial8250_console_fifo_write(struct uart_8250_port *up, cr_sent = false; } } + tx_count = i; } + + /* + * Allow timeout for each byte written since the caller will only wait + * for UART_LSR_BOTH_EMPTY using the timeout of a single character + */ + fifo_wait_for_lsr(up, tx_count); } /* diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 2f0f05259778..5e151e1a6f08 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -346,9 +346,9 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap) flag = TTY_FRAME; } - spin_unlock(&uap->port.lock); + uart_port_unlock(&uap->port); sysrq = uart_handle_sysrq_char(&uap->port, ch & 255); - spin_lock(&uap->port.lock); + uart_port_lock(&uap->port); if (!sysrq) uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); @@ -543,7 +543,7 @@ static void pl011_dma_tx_callback(void *data) unsigned long flags; u16 dmacr; - spin_lock_irqsave(&uap->port.lock, flags); + uart_port_lock_irqsave(&uap->port, &flags); if (uap->dmatx.queued) dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); @@ -564,7 +564,7 @@ static void pl011_dma_tx_callback(void *data) if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || uart_circ_empty(&uap->port.state->xmit)) { uap->dmatx.queued = false; - spin_unlock_irqrestore(&uap->port.lock, flags); + uart_port_unlock_irqrestore(&uap->port, flags); return; } @@ -575,7 +575,7 @@ static void pl011_dma_tx_callback(void *data) */ pl011_start_tx_pio(uap); - spin_unlock_irqrestore(&uap->port.lock, flags); + uart_port_unlock_irqrestore(&uap->port, flags); } /* @@ -1004,7 +1004,7 @@ static void pl011_dma_rx_callback(void *data) * routine to flush out the secondary DMA buffer while * we immediately trigger the next DMA job. */ - spin_lock_irq(&uap->port.lock); + uart_port_lock_irq(&uap->port); /* * Rx data can be taken by the UART interrupts during * the DMA irq handler. So we check the residue here. @@ -1020,7 +1020,7 @@ static void pl011_dma_rx_callback(void *data) ret = pl011_dma_rx_trigger_dma(uap); pl011_dma_rx_chars(uap, pending, lastbuf, false); - spin_unlock_irq(&uap->port.lock); + uart_port_unlock_irq(&uap->port); /* * Do this check after we picked the DMA chars so we don't * get some IRQ immediately from RX. @@ -1086,11 +1086,11 @@ static void pl011_dma_rx_poll(struct timer_list *t) if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) > uap->dmarx.poll_timeout) { - spin_lock_irqsave(&uap->port.lock, flags); + uart_port_lock_irqsave(&uap->port, &flags); pl011_dma_rx_stop(uap); uap->im |= UART011_RXIM; pl011_write(uap->im, uap, REG_IMSC); - spin_unlock_irqrestore(&uap->port.lock, flags); + uart_port_unlock_irqrestore(&uap->port, flags); uap->dmarx.running = false; dmaengine_terminate_all(rxchan); @@ -1186,10 +1186,10 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap) while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) cpu_relax(); - spin_lock_irq(&uap->port.lock); + uart_port_lock_irq(&uap->port); uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); pl011_write(uap->dmacr, uap, REG_DMACR); - spin_unlock_irq(&uap->port.lock); + uart_port_unlock_irq(&uap->port); if (uap->using_tx_dma) { /* In theory, this should already be done by pl011_dma_flush_buffer */ @@ -1400,9 +1400,9 @@ static void pl011_throttle_rx(struct uart_port *port) { unsigned long flags; - spin_lock_irqsave(&port->lock, flags); + uart_port_lock_irqsave(port, &flags); pl011_stop_rx(port); - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); } static void pl011_enable_ms(struct uart_port *port) @@ -1420,7 +1420,7 @@ __acquires(&uap->port.lock) { pl011_fifo_to_tty(uap); - spin_unlock(&uap->port.lock); + uart_port_unlock(&uap->port); tty_flip_buffer_push(&uap->port.state->port); /* * If we were temporarily out of DMA mode for a while, @@ -1445,7 +1445,7 @@ __acquires(&uap->port.lock) #endif } } - spin_lock(&uap->port.lock); + uart_port_lock(&uap->port); } static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, @@ -1551,7 +1551,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id) unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT; int handled = 0; - spin_lock_irqsave(&uap->port.lock, flags); + uart_port_lock_irqsave(&uap->port, &flags); status = pl011_read(uap, REG_RIS) & uap->im; if (status) { do { @@ -1581,7 +1581,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id) handled = 1; } - spin_unlock_irqrestore(&uap->port.lock, flags); + uart_port_unlock_irqrestore(&uap->port, flags); return IRQ_RETVAL(handled); } @@ -1653,14 +1653,14 @@ static void pl011_break_ctl(struct uart_port *port, int break_state) unsigned long flags; unsigned int lcr_h; - spin_lock_irqsave(&uap->port.lock, flags); + uart_port_lock_irqsave(&uap->port, &flags); lcr_h = pl011_read(uap, REG_LCRH_TX); if (break_state == -1) lcr_h |= UART01x_LCRH_BRK; else lcr_h &= ~UART01x_LCRH_BRK; pl011_write(lcr_h, uap, REG_LCRH_TX); - spin_unlock_irqrestore(&uap->port.lock, flags); + uart_port_unlock_irqrestore(&uap->port, flags); } #ifdef CONFIG_CONSOLE_POLL @@ -1799,7 +1799,7 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) unsigned long flags; unsigned int i; - spin_lock_irqsave(&uap->port.lock, flags); + uart_port_lock_irqsave(&uap->port, &flags); /* Clear out any spuriously appearing RX interrupts */ pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); @@ -1821,7 +1821,7 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) if (!pl011_dma_rx_running(uap)) uap->im |= UART011_RXIM; pl011_write(uap->im, uap, REG_IMSC); - spin_unlock_irqrestore(&uap->port.lock, flags); + uart_port_unlock_irqrestore(&uap->port, flags); } static void pl011_unthrottle_rx(struct uart_port *port) @@ -1829,7 +1829,7 @@ static void pl011_unthrottle_rx(struct uart_port *port) struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); unsigned long flags; - spin_lock_irqsave(&uap->port.lock, flags); + uart_port_lock_irqsave(&uap->port, &flags); uap->im = UART011_RTIM; if (!pl011_dma_rx_running(uap)) @@ -1837,7 +1837,14 @@ static void pl011_unthrottle_rx(struct uart_port *port) pl011_write(uap->im, uap, REG_IMSC); - spin_unlock_irqrestore(&uap->port.lock, flags); +#ifdef CONFIG_DMA_ENGINE + if (uap->using_rx_dma) { + uap->dmacr |= UART011_RXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); + } +#endif + + uart_port_unlock_irqrestore(&uap->port, flags); } static int pl011_startup(struct uart_port *port) @@ -1857,7 +1864,7 @@ static int pl011_startup(struct uart_port *port) pl011_write(uap->vendor->ifls, uap, REG_IFLS); - spin_lock_irq(&uap->port.lock); + uart_port_lock_irq(&uap->port); cr = pl011_read(uap, REG_CR); cr &= UART011_CR_RTS | UART011_CR_DTR; @@ -1868,7 +1875,7 @@ static int pl011_startup(struct uart_port *port) pl011_write(cr, uap, REG_CR); - spin_unlock_irq(&uap->port.lock); + uart_port_unlock_irq(&uap->port); /* * initialise the old status of the modem signals @@ -1929,12 +1936,12 @@ static void pl011_disable_uart(struct uart_amba_port *uap) unsigned int cr; uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); - spin_lock_irq(&uap->port.lock); + uart_port_lock_irq(&uap->port); cr = pl011_read(uap, REG_CR); cr &= UART011_CR_RTS | UART011_CR_DTR; cr |= UART01x_CR_UARTEN | UART011_CR_TXE; pl011_write(cr, uap, REG_CR); - spin_unlock_irq(&uap->port.lock); + uart_port_unlock_irq(&uap->port); /* * disable break condition and fifos @@ -1946,14 +1953,14 @@ static void pl011_disable_uart(struct uart_amba_port *uap) static void pl011_disable_interrupts(struct uart_amba_port *uap) { - spin_lock_irq(&uap->port.lock); + uart_port_lock_irq(&uap->port); /* mask all interrupts and clear all pending ones */ uap->im = 0; pl011_write(uap->im, uap, REG_IMSC); pl011_write(0xffff, uap, REG_ICR); - spin_unlock_irq(&uap->port.lock); + uart_port_unlock_irq(&uap->port); } static void pl011_shutdown(struct uart_port *port) @@ -2098,7 +2105,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, bits = tty_get_frame_size(termios->c_cflag); - spin_lock_irqsave(&port->lock, flags); + uart_port_lock_irqsave(port, &flags); /* * Update the per-port timeout. @@ -2165,7 +2172,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios, pl011_write_lcr_h(uap, lcr_h); pl011_write(old_cr, uap, REG_CR); - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); } static void @@ -2183,10 +2190,10 @@ sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios, termios->c_cflag &= ~(CMSPAR | CRTSCTS); termios->c_cflag |= CS8 | CLOCAL; - spin_lock_irqsave(&port->lock, flags); + uart_port_lock_irqsave(port, &flags); uart_update_timeout(port, CS8, uap->fixed_baud); pl011_setup_status_masks(port, termios); - spin_unlock_irqrestore(&port->lock, flags); + uart_port_unlock_irqrestore(port, flags); } static const char *pl011_type(struct uart_port *port) @@ -2325,9 +2332,9 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) if (uap->port.sysrq) locked = 0; else if (oops_in_progress) - locked = spin_trylock(&uap->port.lock); + locked = uart_port_trylock(&uap->port); else - spin_lock(&uap->port.lock); + uart_port_lock(&uap->port); /* * First save the CR then disable the interrupts @@ -2353,7 +2360,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) pl011_write(old_cr, uap, REG_CR); if (locked) - spin_unlock(&uap->port.lock); + uart_port_unlock(&uap->port); local_irq_restore(flags); clk_disable(uap->clk); diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 6a9310379dc2..b3463cdd1d4b 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -692,7 +692,7 @@ static void atmel_disable_ms(struct uart_port *port) atmel_port->ms_irq_enabled = false; - mctrl_gpio_disable_ms(atmel_port->gpios); + mctrl_gpio_disable_ms_no_sync(atmel_port->gpios); if (!mctrl_gpio_to_gpiod(atmel_port->gpios, UART_GPIO_CTS)) idr |= ATMEL_US_CTSIC; diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index f94c78263868..011f17593613 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1394,12 +1394,25 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio struct lpuart_port, port); unsigned long modem = lpuart32_read(&sport->port, UARTMODIR) - & ~(UARTMODEM_TXRTSPOL | UARTMODEM_TXRTSE); + & ~(UARTMODIR_TXRTSPOL | UARTMODIR_TXRTSE); + u32 ctrl; + + /* TXRTSE and TXRTSPOL only can be changed when transmitter is disabled. */ + ctrl = lpuart32_read(&sport->port, UARTCTRL); + if (ctrl & UARTCTRL_TE) { + /* wait for the transmit engine to complete */ + lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC); + lpuart32_write(&sport->port, ctrl & ~UARTCTRL_TE, UARTCTRL); + + while (lpuart32_read(&sport->port, UARTCTRL) & UARTCTRL_TE) + cpu_relax(); + } + lpuart32_write(&sport->port, modem, UARTMODIR); if (rs485->flags & SER_RS485_ENABLED) { /* Enable auto RS-485 RTS mode */ - modem |= UARTMODEM_TXRTSE; + modem |= UARTMODIR_TXRTSE; /* * The hardware defaults to RTS logic HIGH while transfer. @@ -1408,12 +1421,16 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio * Note: UART is assumed to be active high. */ if (rs485->flags & SER_RS485_RTS_ON_SEND) - modem |= UARTMODEM_TXRTSPOL; + modem |= UARTMODIR_TXRTSPOL; else if (rs485->flags & SER_RS485_RTS_AFTER_SEND) - modem &= ~UARTMODEM_TXRTSPOL; + modem &= ~UARTMODIR_TXRTSPOL; } lpuart32_write(&sport->port, modem, UARTMODIR); + + if (ctrl & UARTCTRL_TE) + lpuart32_write(&sport->port, ctrl, UARTCTRL); + return 0; } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 94e0781e00e8..fe22ca009fb3 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1586,7 +1586,7 @@ static void imx_uart_shutdown(struct uart_port *port) imx_uart_dma_exit(sport); } - mctrl_gpio_disable_ms(sport->gpios); + mctrl_gpio_disable_ms_sync(sport->gpios); spin_lock_irqsave(&sport->port.lock, flags); ucr2 = imx_uart_readl(sport, UCR2); diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 7dd19a281579..fd50342526d1 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -1745,6 +1745,12 @@ msm_serial_early_console_setup_dm(struct earlycon_device *device, if (!device->port.membase) return -ENODEV; + /* Disable DM / single-character modes */ + msm_write(&device->port, 0, UARTDM_DMEN); + msm_write(&device->port, MSM_UART_CR_CMD_RESET_RX, MSM_UART_CR); + msm_write(&device->port, MSM_UART_CR_CMD_RESET_TX, MSM_UART_CR); + msm_write(&device->port, MSM_UART_CR_TX_ENABLE, MSM_UART_CR); + device->con->write = msm_serial_early_write_dm; return 0; } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index a723df9b37dd..c07baf5d5a9c 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -545,6 +545,9 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) SC16IS7XX_MCR_CLKSEL_BIT, prescaler == 1 ? 0 : SC16IS7XX_MCR_CLKSEL_BIT); + + mutex_lock(&one->efr_lock); + /* Open the LCR divisors for configuration */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, SC16IS7XX_LCR_CONF_MODE_A); @@ -558,6 +561,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) /* Put LCR back to the normal mode */ sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr); + mutex_unlock(&one->efr_lock); + return DIV_ROUND_CLOSEST((clk / prescaler) / 16, div); } diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c index 7d5aaa8d422b..d5fb293dd5a9 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.c +++ b/drivers/tty/serial/serial_mctrl_gpio.c @@ -322,11 +322,7 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios) } EXPORT_SYMBOL_GPL(mctrl_gpio_enable_ms); -/** - * mctrl_gpio_disable_ms - disable irqs and handling of changes to the ms lines - * @gpios: gpios to disable - */ -void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) +static void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios, bool sync) { enum mctrl_gpio_idx i; @@ -342,10 +338,34 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) if (!gpios->irq[i]) continue; - disable_irq(gpios->irq[i]); + if (sync) + disable_irq(gpios->irq[i]); + else + disable_irq_nosync(gpios->irq[i]); } } -EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms); + +/** + * mctrl_gpio_disable_ms_sync - disable irqs and handling of changes to the ms + * lines, and wait for any pending IRQ to be processed + * @gpios: gpios to disable + */ +void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios) +{ + mctrl_gpio_disable_ms(gpios, true); +} +EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_sync); + +/** + * mctrl_gpio_disable_ms_no_sync - disable irqs and handling of changes to the + * ms lines, and return immediately + * @gpios: gpios to disable + */ +void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios) +{ + mctrl_gpio_disable_ms(gpios, false); +} +EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms_no_sync); void mctrl_gpio_enable_irq_wake(struct mctrl_gpios *gpios) { diff --git a/drivers/tty/serial/serial_mctrl_gpio.h b/drivers/tty/serial/serial_mctrl_gpio.h index fc76910fb105..79e97838ebe5 100644 --- a/drivers/tty/serial/serial_mctrl_gpio.h +++ b/drivers/tty/serial/serial_mctrl_gpio.h @@ -87,9 +87,16 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios); void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios); /* - * Disable gpio interrupts to report status line changes. + * Disable gpio interrupts to report status line changes, and block until + * any corresponding IRQ is processed */ -void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios); +void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios); + +/* + * Disable gpio interrupts to report status line changes, and return + * immediately + */ +void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios); /* * Enable gpio wakeup interrupts to enable wake up source. @@ -148,7 +155,11 @@ static inline void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios) { } -static inline void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios) +static inline void mctrl_gpio_disable_ms_sync(struct mctrl_gpios *gpios) +{ +} + +static inline void mctrl_gpio_disable_ms_no_sync(struct mctrl_gpios *gpios) { } diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 08ad5ae41121..ed468b676f0b 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -105,6 +105,20 @@ struct plat_sci_reg { u8 offset, size; }; +struct sci_suspend_regs { + u16 scdl; + u16 sccks; + u16 scsmr; + u16 scscr; + u16 scfcr; + u16 scsptr; + u16 hssrr; + u16 scpcr; + u16 scpdr; + u8 scbrr; + u8 semr; +}; + struct sci_port_params { const struct plat_sci_reg regs[SCIx_NR_REGS]; unsigned int fifosize; @@ -135,6 +149,8 @@ struct sci_port { struct dma_chan *chan_tx; struct dma_chan *chan_rx; + struct reset_control *rstc; + #ifdef CONFIG_SERIAL_SH_SCI_DMA struct dma_chan *chan_tx_saved; struct dma_chan *chan_rx_saved; @@ -154,6 +170,7 @@ struct sci_port { int rx_trigger; struct timer_list rx_fifo_timer; int rx_fifo_timeout; + struct sci_suspend_regs suspend_regs; u16 hscif_tot; bool has_rtscts; @@ -165,6 +182,7 @@ struct sci_port { static struct sci_port sci_ports[SCI_NPORTS]; static unsigned long sci_ports_in_use; static struct uart_driver sci_uart_driver; +static bool sci_uart_earlycon; static inline struct sci_port * to_sci_port(struct uart_port *uart) @@ -2181,7 +2199,7 @@ static void sci_shutdown(struct uart_port *port) dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); s->autorts = false; - mctrl_gpio_disable_ms(to_sci_port(port)->gpios); + mctrl_gpio_disable_ms_sync(to_sci_port(port)->gpios); spin_lock_irqsave(&port->lock, flags); sci_stop_rx(port); @@ -3251,6 +3269,7 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev, } sp = &sci_ports[id]; + sp->rstc = rstc; *dev_id = id; p->type = SCI_OF_TYPE(data); @@ -3318,6 +3337,7 @@ static int sci_probe_single(struct platform_device *dev, static int sci_probe(struct platform_device *dev) { struct plat_sci_port *p; + struct resource *res; struct sci_port *sp; unsigned int dev_id; int ret; @@ -3347,6 +3367,26 @@ static int sci_probe(struct platform_device *dev) } sp = &sci_ports[dev_id]; + + /* + * In case: + * - the probed port alias is zero (as the one used by earlycon), and + * - the earlycon is still active (e.g., "earlycon keep_bootcon" in + * bootargs) + * + * defer the probe of this serial. This is a debug scenario and the user + * must be aware of it. + * + * Except when the probed port is the same as the earlycon port. + */ + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + if (sci_uart_earlycon && sp == &sci_ports[0] && sp->port.mapbase != res->start) + return dev_err_probe(&dev->dev, -EBUSY, "sci_port[0] is used by earlycon!\n"); + platform_set_drvdata(dev, sp); ret = sci_probe_single(dev, dev_id, p, sp); @@ -3378,13 +3418,77 @@ static int sci_probe(struct platform_device *dev) return 0; } +static void sci_console_save(struct sci_port *s) +{ + struct sci_suspend_regs *regs = &s->suspend_regs; + struct uart_port *port = &s->port; + + if (sci_getreg(port, SCDL)->size) + regs->scdl = sci_serial_in(port, SCDL); + if (sci_getreg(port, SCCKS)->size) + regs->sccks = sci_serial_in(port, SCCKS); + if (sci_getreg(port, SCSMR)->size) + regs->scsmr = sci_serial_in(port, SCSMR); + if (sci_getreg(port, SCSCR)->size) + regs->scscr = sci_serial_in(port, SCSCR); + if (sci_getreg(port, SCFCR)->size) + regs->scfcr = sci_serial_in(port, SCFCR); + if (sci_getreg(port, SCSPTR)->size) + regs->scsptr = sci_serial_in(port, SCSPTR); + if (sci_getreg(port, SCBRR)->size) + regs->scbrr = sci_serial_in(port, SCBRR); + if (sci_getreg(port, HSSRR)->size) + regs->hssrr = sci_serial_in(port, HSSRR); + if (sci_getreg(port, SCPCR)->size) + regs->scpcr = sci_serial_in(port, SCPCR); + if (sci_getreg(port, SCPDR)->size) + regs->scpdr = sci_serial_in(port, SCPDR); + if (sci_getreg(port, SEMR)->size) + regs->semr = sci_serial_in(port, SEMR); +} + +static void sci_console_restore(struct sci_port *s) +{ + struct sci_suspend_regs *regs = &s->suspend_regs; + struct uart_port *port = &s->port; + + if (sci_getreg(port, SCDL)->size) + sci_serial_out(port, SCDL, regs->scdl); + if (sci_getreg(port, SCCKS)->size) + sci_serial_out(port, SCCKS, regs->sccks); + if (sci_getreg(port, SCSMR)->size) + sci_serial_out(port, SCSMR, regs->scsmr); + if (sci_getreg(port, SCSCR)->size) + sci_serial_out(port, SCSCR, regs->scscr); + if (sci_getreg(port, SCFCR)->size) + sci_serial_out(port, SCFCR, regs->scfcr); + if (sci_getreg(port, SCSPTR)->size) + sci_serial_out(port, SCSPTR, regs->scsptr); + if (sci_getreg(port, SCBRR)->size) + sci_serial_out(port, SCBRR, regs->scbrr); + if (sci_getreg(port, HSSRR)->size) + sci_serial_out(port, HSSRR, regs->hssrr); + if (sci_getreg(port, SCPCR)->size) + sci_serial_out(port, SCPCR, regs->scpcr); + if (sci_getreg(port, SCPDR)->size) + sci_serial_out(port, SCPDR, regs->scpdr); + if (sci_getreg(port, SEMR)->size) + sci_serial_out(port, SEMR, regs->semr); +} + static __maybe_unused int sci_suspend(struct device *dev) { struct sci_port *sport = dev_get_drvdata(dev); - if (sport) + if (sport) { uart_suspend_port(&sci_uart_driver, &sport->port); + if (!console_suspend_enabled && uart_console(&sport->port)) + sci_console_save(sport); + else + return reset_control_assert(sport->rstc); + } + return 0; } @@ -3392,8 +3496,18 @@ static __maybe_unused int sci_resume(struct device *dev) { struct sci_port *sport = dev_get_drvdata(dev); - if (sport) + if (sport) { + if (!console_suspend_enabled && uart_console(&sport->port)) { + sci_console_restore(sport); + } else { + int ret = reset_control_deassert(sport->rstc); + + if (ret) + return ret; + } + uart_resume_port(&sci_uart_driver, &sport->port); + } return 0; } @@ -3430,7 +3544,7 @@ sh_early_platform_init_buffer("earlyprintk", &sci_driver, early_serial_buf, ARRAY_SIZE(early_serial_buf)); #endif #ifdef CONFIG_SERIAL_SH_SCI_EARLYCON -static struct plat_sci_port port_cfg __initdata; +static struct plat_sci_port port_cfg; static int __init early_console_setup(struct earlycon_device *device, int type) @@ -3445,6 +3559,7 @@ static int __init early_console_setup(struct earlycon_device *device, port_cfg.type = type; sci_ports[0].cfg = &port_cfg; sci_ports[0].params = sci_probe_regmap(&port_cfg); + sci_uart_earlycon = true; port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR); sci_serial_out(&sci_ports[0].port, SCSCR, SCSCR_RE | SCSCR_TE | port_cfg.scscr); diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index 2affd1351ae7..4d7825c4d769 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -583,8 +583,11 @@ static void sifive_serial_break_ctl(struct uart_port *port, int break_state) static int sifive_serial_startup(struct uart_port *port) { struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + unsigned long flags; + uart_port_lock_irqsave(&ssp->port, &flags); __ssp_enable_rxwm(ssp); + uart_port_unlock_irqrestore(&ssp->port, flags); return 0; } @@ -592,9 +595,12 @@ static int sifive_serial_startup(struct uart_port *port) static void sifive_serial_shutdown(struct uart_port *port) { struct sifive_serial_port *ssp = port_to_sifive_serial_port(port); + unsigned long flags; + uart_port_lock_irqsave(&ssp->port, &flags); __ssp_disable_rxwm(ssp); __ssp_disable_txwm(ssp); + uart_port_unlock_irqrestore(&ssp->port, flags); } /** diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 7d11511c8c12..8670bb5042c4 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -850,7 +850,7 @@ static void stm32_usart_enable_ms(struct uart_port *port) static void stm32_usart_disable_ms(struct uart_port *port) { - mctrl_gpio_disable_ms(to_stm32_port(port)->gpios); + mctrl_gpio_disable_ms_sync(to_stm32_port(port)->gpios); } /* Transmit stop */ diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 2eff7cff57c4..29afcc6d9bb7 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -268,7 +268,7 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus) continue; } - if (uart_handle_sysrq_char(port, data)) + if (uart_prepare_sysrq_char(port, data)) continue; if (is_rxbs_support) { @@ -371,7 +371,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id) !(readl(port->membase + CDNS_UART_CR) & CDNS_UART_CR_RX_DIS)) cdns_uart_handle_rx(dev_id, isrstatus); - spin_unlock(&port->lock); + uart_unlock_and_check_sysrq(port); return IRQ_HANDLED; } @@ -1231,10 +1231,8 @@ static void cdns_uart_console_write(struct console *co, const char *s, unsigned int imr, ctrl; int locked = 1; - if (port->sysrq) - locked = 0; - else if (oops_in_progress) - locked = spin_trylock_irqsave(&port->lock, flags); + if (oops_in_progress) + locked = uart_port_trylock_irqsave(port, &flags); else spin_lock_irqsave(&port->lock, flags); diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 7ca7731fa78a..2238d4c19f54 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -830,7 +830,7 @@ static struct ctl_table tty_table[] = { .data = &tty_ldisc_autoload, .maxlen = sizeof(tty_ldisc_autoload), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 53aea56d1de1..17e38979b822 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -452,6 +452,9 @@ static ssize_t read_req_latency_avg_show(struct device *dev, struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_hba_monitor *m = &hba->monitor; + if (!m->nr_req[READ]) + return sysfs_emit(buf, "0\n"); + return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]), m->nr_req[READ])); } @@ -519,6 +522,9 @@ static ssize_t write_req_latency_avg_show(struct device *dev, struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_hba_monitor *m = &hba->monitor; + if (!m->nr_req[WRITE]) + return sysfs_emit(buf, "0\n"); + return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]), m->nr_req[WRITE])); } diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c index b99e3f3dc4ef..ead55e063d2b 100644 --- a/drivers/ufs/core/ufs_bsg.c +++ b/drivers/ufs/core/ufs_bsg.c @@ -181,6 +181,7 @@ void ufs_bsg_remove(struct ufs_hba *hba) return; bsg_remove_queue(hba->bsg_queue); + hba->bsg_queue = NULL; device_del(bsg_dev); put_device(bsg_dev); @@ -219,6 +220,7 @@ int ufs_bsg_probe(struct ufs_hba *hba) q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0); if (IS_ERR(q)) { ret = PTR_ERR(q); + device_del(bsg_dev); goto out; } diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index a1809aa9a889..c5115f6adbdc 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -2230,8 +2230,6 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) int err; hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); - if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) - hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; /* nutrs and nutmrs are 0 based values */ hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1; @@ -4449,9 +4447,6 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, dev_err(hba->dev, "%s: power mode change failed %d\n", __func__, ret); } else { - ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, - pwr_mode); - memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); } @@ -4478,6 +4473,10 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba, ret = ufshcd_change_power_mode(hba, &final_params); + if (!ret) + ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL, + &final_params); + return ret; } EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode); @@ -9648,6 +9647,8 @@ EXPORT_SYMBOL_GPL(ufshcd_dealloc_host); */ static int ufshcd_set_dma_mask(struct ufs_hba *hba) { + if (hba->vops && hba->vops->set_dma_mask) + return hba->vops->set_dma_mask(hba); if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) { if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64))) return 0; @@ -9878,14 +9879,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) } /* - * Set the default power management level for runtime and system PM. + * Set the default power management level for runtime and system PM if + * not set by the host controller drivers. * Default power saving mode is to keep UFS link in Hibern8 state * and UFS device in sleep state. */ - hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + if (!hba->rpm_lvl) + hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE); - hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( + if (!hba->spm_lvl) + hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state( UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE); diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index 3cdac89a28b8..e981f1f8805f 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -901,6 +901,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs) } phy_set_bus_width(generic_phy, ufs->avail_ln_rx); + + if (generic_phy->power_count) { + phy_power_off(generic_phy); + phy_exit(generic_phy); + } + ret = phy_init(generic_phy); if (ret) { dev_err(hba->dev, "%s: phy init failed, ret = %d\n", @@ -984,9 +990,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4); exynos_ufs_set_unipro_pclk_div(ufs); + exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); + /* unipro */ exynos_ufs_config_unipro(ufs); + if (ufs->drv_data->pre_link) + ufs->drv_data->pre_link(ufs); + /* m-phy */ exynos_ufs_phy_init(ufs); if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) { @@ -994,11 +1005,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba) exynos_ufs_config_phy_cap_attr(ufs); } - exynos_ufs_setup_clocks(hba, true, PRE_CHANGE); - - if (ufs->drv_data->pre_link) - ufs->drv_data->pre_link(ufs); - return 0; } @@ -1228,12 +1234,12 @@ static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba) hci_writel(ufs, 1 << 0, HCI_GPIO_OUT); } -static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr; - if (!enter) { + if (cmd == UIC_CMD_DME_HIBER_EXIT) { if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) exynos_ufs_disable_auto_ctrl_hcc(ufs); exynos_ufs_ungate_clks(ufs); @@ -1261,11 +1267,11 @@ static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter) } } -static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) +static void exynos_ufs_post_hibern8(struct ufs_hba *hba, enum uic_cmd_dme cmd) { struct exynos_ufs *ufs = ufshcd_get_variant(hba); - if (!enter) { + if (cmd == UIC_CMD_DME_HIBER_EXIT) { u32 cur_mode = 0; u32 pwrmode; @@ -1284,7 +1290,7 @@ static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter) if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)) exynos_ufs_establish_connt(ufs); - } else { + } else if (cmd == UIC_CMD_DME_HIBER_ENTER) { ufs->entry_hibern8_t = ktime_get(); exynos_ufs_gate_clks(ufs); if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL) @@ -1363,15 +1369,15 @@ static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba, } static void exynos_ufs_hibern8_notify(struct ufs_hba *hba, - enum uic_cmd_dme enter, + enum uic_cmd_dme cmd, enum ufs_notify_change_status notify) { switch ((u8)notify) { case PRE_CHANGE: - exynos_ufs_pre_hibern8(hba, enter); + exynos_ufs_pre_hibern8(hba, cmd); break; case POST_CHANGE: - exynos_ufs_post_hibern8(hba, enter); + exynos_ufs_post_hibern8(hba, cmd); break; } } diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index ab0652d8705a..481ad0a3a6c7 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -364,14 +365,20 @@ static int ufs_renesas_init(struct ufs_hba *hba) return -ENOMEM; ufshcd_set_variant(hba, priv); - hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO; + hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO; return 0; } +static int ufs_renesas_set_dma_mask(struct ufs_hba *hba) +{ + return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); +} + static const struct ufs_hba_variant_ops ufs_renesas_vops = { .name = "renesas", .init = ufs_renesas_init, + .set_dma_mask = ufs_renesas_set_dma_mask, .setup_clocks = ufs_renesas_setup_clocks, .hce_enable_notify = ufs_renesas_hce_enable_notify, .dbg_register_dump = ufs_renesas_dbg_register_dump, diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 8f3b9a0a38e1..1443e9cf631a 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -1131,7 +1131,10 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, struct cxacru_data *instance; struct usb_device *usb_dev = interface_to_usbdev(intf); struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD]; - struct usb_endpoint_descriptor *in, *out; + static const u8 ep_addrs[] = { + CXACRU_EP_CMD + USB_DIR_IN, + CXACRU_EP_CMD + USB_DIR_OUT, + 0}; int ret; /* instance init */ @@ -1179,13 +1182,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance, } if (usb_endpoint_xfer_int(&cmd_ep->desc)) - ret = usb_find_common_endpoints(intf->cur_altsetting, - NULL, NULL, &in, &out); + ret = usb_check_int_endpoints(intf, ep_addrs); else - ret = usb_find_common_endpoints(intf->cur_altsetting, - &in, &out, NULL, NULL); + ret = usb_check_bulk_endpoints(intf, ep_addrs); - if (ret) { + if (!ret) { usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n"); ret = -ENODEV; goto fail; diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index 2b8f98f0707e..c12d7f040171 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -1960,6 +1960,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) unsigned int bit; unsigned long reg; + local_bh_disable(); spin_lock_irqsave(&priv_dev->lock, flags); reg = readl(&priv_dev->regs->usb_ists); @@ -2001,6 +2002,7 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) irqend: writel(~0, &priv_dev->regs->ep_ien); spin_unlock_irqrestore(&priv_dev->lock, flags); + local_bh_enable(); return ret; } diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c index 0044897ee800..913050230d1a 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.c +++ b/drivers/usb/cdns3/cdnsp-gadget.c @@ -138,6 +138,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev, (portsc & PORT_CHANGE_BITS), port_regs); } +static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev) +{ + struct cdns *cdns = dev_get_drvdata(pdev->dev); + __le32 __iomem *reg; + void __iomem *base; + u32 offset = 0; + u32 val; + + if (!cdns->override_apb_timeout) + return; + + base = &pdev->cap_regs->hc_capbase; + offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP); + reg = base + offset + REG_CHICKEN_BITS_3_OFFSET; + + val = le32_to_cpu(readl(reg)); + val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout); + writel(cpu_to_le32(val), reg); +} + static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit) { __le32 __iomem *reg; @@ -1776,6 +1796,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev) reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP); pdev->rev_cap = reg; + pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision); + dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n", readl(&pdev->rev_cap->ctrl_revision), readl(&pdev->rev_cap->rtl_revision), @@ -1801,6 +1823,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev) pdev->hci_version = HC_VERSION(pdev->hcc_params); pdev->hcc_params = readl(&pdev->cap_regs->hcc_params); + /* + * Override the APB timeout value to give the controller more time for + * enabling UTMI clock and synchronizing APB and UTMI clock domains. + * This fix is platform specific and is required to fixes issue with + * reading incorrect value from PORTSC register after resuming + * from L1 state. + */ + cdnsp_set_apb_timeout_value(pdev); + cdnsp_get_rev_cap(pdev); /* Make sure the Device Controller is halted. */ diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h index a61aef0dc273..5cffc1444d3a 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.h +++ b/drivers/usb/cdns3/cdnsp-gadget.h @@ -520,6 +520,9 @@ struct cdnsp_rev_cap { #define REG_CHICKEN_BITS_2_OFFSET 0x48 #define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28) +#define REG_CHICKEN_BITS_3_OFFSET 0x4C +#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val)) + /* XBUF Extended Capability ID. */ #define XBUF_CAP_ID 0xCB #define XBUF_RX_TAG_MASK_0_OFFSET 0x1C @@ -1359,6 +1362,7 @@ struct cdnsp_port { * @rev_cap: Controller Capabilities Registers. * @hcs_params1: Cached register copies of read-only HCSPARAMS1 * @hcc_params: Cached register copies of read-only HCCPARAMS1 + * @rtl_revision: Cached controller rtl revision. * @setup: Temporary buffer for setup packet. * @ep0_preq: Internal allocated request used during enumeration. * @ep0_stage: ep0 stage during enumeration process. @@ -1413,6 +1417,8 @@ struct cdnsp_device { __u32 hcs_params1; __u32 hcs_params3; __u32 hcc_params; + #define RTL_REVISION_NEW_LPM 0x2700 + __u32 rtl_revision; /* Lock used in interrupt thread context. */ spinlock_t lock; struct usb_ctrlrequest setup; diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c index a85db23fa19f..b7a1f28faa1f 100644 --- a/drivers/usb/cdns3/cdnsp-pci.c +++ b/drivers/usb/cdns3/cdnsp-pci.c @@ -33,6 +33,8 @@ #define CDNS_DRD_ID 0x0100 #define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80) +#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20 + static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev) { /* @@ -144,6 +146,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, cdnsp->otg_irq = pdev->irq; } + /* + * Cadence PCI based platform require some longer timeout for APB + * to fixes domain clock synchronization issue after resuming + * controller from L1 state. + */ + cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE; + pci_set_drvdata(pdev, cdnsp); + if (pci_is_enabled(func)) { cdnsp->dev = dev; cdnsp->gadget_init = cdnsp_gadget_init; @@ -153,8 +163,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev, goto free_cdnsp; } - pci_set_drvdata(pdev, cdnsp); - device_wakeup_enable(&pdev->dev); if (pci_dev_run_wake(pdev)) pm_runtime_put_noidle(&pdev->dev); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index 47096b8e3179..6247584cb939 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev, writel(db_value, reg_addr); - cdnsp_force_l0_go(pdev); + if (pdev->rtl_revision < RTL_REVISION_NEW_LPM) + cdnsp_force_l0_go(pdev); /* Doorbell was set. */ return true; diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 81a9c9d6be08..ac30ee21309d 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -44,6 +44,7 @@ struct cdns3_platform_data { bool suspend, bool wakeup); unsigned long quirks; #define CDNS3_DEFAULT_PM_RUNTIME_ALLOW BIT(0) +#define CDNS3_DRD_SUSPEND_RESIDENCY_ENABLE BIT(1) }; /** @@ -78,6 +79,8 @@ struct cdns3_platform_data { * @pdata: platform data from glue layer * @lock: spinlock structure * @xhci_plat_data: xhci private data structure pointer + * @override_apb_timeout: hold value of APB timeout. For value 0 the default + * value in CHICKEN_BITS_3 will be preserved. * @gadget_init: pointer to gadget initialization function */ struct cdns { @@ -116,6 +119,7 @@ struct cdns { struct cdns3_platform_data *pdata; spinlock_t lock; struct xhci_plat_priv *xhci_plat_data; + u32 override_apb_timeout; int (*gadget_init)(struct cdns *cdns); }; diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c index 33ba30f79b33..8e19ee72c120 100644 --- a/drivers/usb/cdns3/drd.c +++ b/drivers/usb/cdns3/drd.c @@ -385,7 +385,7 @@ static irqreturn_t cdns_drd_irq(int irq, void *data) int cdns_drd_init(struct cdns *cdns) { void __iomem *regs; - u32 state; + u32 state, reg; int ret; regs = devm_ioremap_resource(cdns->dev, &cdns->otg_res); @@ -429,6 +429,14 @@ int cdns_drd_init(struct cdns *cdns) cdns->otg_irq_regs = (struct cdns_otg_irq_regs __iomem *) &cdns->otg_v1_regs->ien; writel(1, &cdns->otg_v1_regs->simulate); + + if (cdns->pdata && + (cdns->pdata->quirks & CDNS3_DRD_SUSPEND_RESIDENCY_ENABLE)) { + reg = readl(&cdns->otg_v1_regs->susp_ctrl); + reg |= SUSP_CTRL_SUSPEND_RESIDENCY_ENABLE; + writel(reg, &cdns->otg_v1_regs->susp_ctrl); + } + cdns->version = CDNS3_CONTROLLER_V1; } else { dev_err(cdns->dev, "not supporte DID=0x%08x\n", state); diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h index d72370c321d3..1e2aee14d629 100644 --- a/drivers/usb/cdns3/drd.h +++ b/drivers/usb/cdns3/drd.h @@ -193,6 +193,9 @@ struct cdns_otg_irq_regs { /* OTGREFCLK - bitmasks */ #define OTGREFCLK_STB_CLK_SWITCH_EN BIT(31) +/* SUPS_CTRL - bitmasks */ +#define SUSP_CTRL_SUSPEND_RESIDENCY_ENABLE BIT(17) + /* OVERRIDE - bitmasks */ #define OVERRIDE_IDPULLUP BIT(0) /* Only for CDNS3_CONTROLLER_V0 version */ diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 984087bbf3e2..9a4c7ea1241c 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -324,6 +324,13 @@ static int ci_hdrc_imx_notify_event(struct ci_hdrc *ci, unsigned int event) return ret; } +static void ci_hdrc_imx_disable_regulator(void *arg) +{ + struct ci_hdrc_imx_data *data = arg; + + regulator_disable(data->hsic_pad_regulator); +} + static int ci_hdrc_imx_probe(struct platform_device *pdev) { struct ci_hdrc_imx_data *data; @@ -357,25 +364,36 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) data->pinctrl = devm_pinctrl_get(dev); if (PTR_ERR(data->pinctrl) == -ENODEV) data->pinctrl = NULL; - else if (IS_ERR(data->pinctrl)) - return dev_err_probe(dev, PTR_ERR(data->pinctrl), + else if (IS_ERR(data->pinctrl)) { + ret = dev_err_probe(dev, PTR_ERR(data->pinctrl), "pinctrl get failed\n"); + goto err_put; + } data->hsic_pad_regulator = devm_regulator_get_optional(dev, "hsic"); if (PTR_ERR(data->hsic_pad_regulator) == -ENODEV) { /* no pad regualator is needed */ data->hsic_pad_regulator = NULL; - } else if (IS_ERR(data->hsic_pad_regulator)) - return dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator), + } else if (IS_ERR(data->hsic_pad_regulator)) { + ret = dev_err_probe(dev, PTR_ERR(data->hsic_pad_regulator), "Get HSIC pad regulator error\n"); + goto err_put; + } if (data->hsic_pad_regulator) { ret = regulator_enable(data->hsic_pad_regulator); if (ret) { dev_err(dev, "Failed to enable HSIC pad regulator\n"); - return ret; + goto err_put; + } + ret = devm_add_action_or_reset(dev, + ci_hdrc_imx_disable_regulator, data); + if (ret) { + dev_err(dev, + "Failed to add regulator devm action\n"); + goto err_put; } } } @@ -389,13 +407,14 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) dev_err(dev, "pinctrl_hsic_idle lookup failed, err=%ld\n", PTR_ERR(pinctrl_hsic_idle)); - return PTR_ERR(pinctrl_hsic_idle); + ret = PTR_ERR(pinctrl_hsic_idle); + goto err_put; } ret = pinctrl_select_state(data->pinctrl, pinctrl_hsic_idle); if (ret) { dev_err(dev, "hsic_idle select failed, err=%d\n", ret); - return ret; + goto err_put; } data->pinctrl_hsic_active = pinctrl_lookup_state(data->pinctrl, @@ -404,7 +423,8 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) dev_err(dev, "pinctrl_hsic_active lookup failed, err=%ld\n", PTR_ERR(data->pinctrl_hsic_active)); - return PTR_ERR(data->pinctrl_hsic_active); + ret = PTR_ERR(data->pinctrl_hsic_active); + goto err_put; } } @@ -413,11 +433,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) ret = imx_get_clks(dev); if (ret) - goto disable_hsic_regulator; + goto qos_remove_request; ret = imx_prepare_enable_clks(dev); if (ret) - goto disable_hsic_regulator; + goto qos_remove_request; data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0); if (IS_ERR(data->phy)) { @@ -443,7 +463,11 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) { pdata.flags |= CI_HDRC_OVERRIDE_PHY_CONTROL; data->override_phy_control = true; - usb_phy_init(pdata.usb_phy); + ret = usb_phy_init(pdata.usb_phy); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_clk; + } } if (pdata.flags & CI_HDRC_SUPPORTS_RUNTIME_PM) @@ -452,7 +476,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) ret = imx_usbmisc_init(data->usbmisc_data); if (ret) { dev_err(dev, "usbmisc init failed, ret=%d\n", ret); - goto err_clk; + goto phy_shutdown; } data->ci_pdev = ci_hdrc_add_device(dev, @@ -461,7 +485,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) if (IS_ERR(data->ci_pdev)) { ret = PTR_ERR(data->ci_pdev); dev_err_probe(dev, ret, "ci_hdrc_add_device failed\n"); - goto err_clk; + goto phy_shutdown; } if (data->usbmisc_data) { @@ -495,19 +519,22 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev) disable_device: ci_hdrc_remove_device(data->ci_pdev); +phy_shutdown: + if (data->override_phy_control) + usb_phy_shutdown(data->phy); err_clk: imx_disable_unprepare_clks(dev); -disable_hsic_regulator: - if (data->hsic_pad_regulator) - /* don't overwrite original ret (cf. EPROBE_DEFER) */ - regulator_disable(data->hsic_pad_regulator); +qos_remove_request: if (pdata.flags & CI_HDRC_PMQOS) cpu_latency_qos_remove_request(&data->pm_qos_req); data->ci_pdev = NULL; +err_put: + if (data->usbmisc_data) + put_device(data->usbmisc_data->dev); return ret; } -static int ci_hdrc_imx_remove(struct platform_device *pdev) +static void ci_hdrc_imx_remove(struct platform_device *pdev) { struct ci_hdrc_imx_data *data = platform_get_drvdata(pdev); @@ -524,11 +551,9 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev) imx_disable_unprepare_clks(&pdev->dev); if (data->plat_data->flags & CI_HDRC_PMQOS) cpu_latency_qos_remove_request(&data->pm_qos_req); - if (data->hsic_pad_regulator) - regulator_disable(data->hsic_pad_regulator); } - - return 0; + if (data->usbmisc_data) + put_device(data->usbmisc_data->dev); } static void ci_hdrc_imx_shutdown(struct platform_device *pdev) @@ -674,7 +699,7 @@ static const struct dev_pm_ops ci_hdrc_imx_pm_ops = { }; static struct platform_driver ci_hdrc_imx_driver = { .probe = ci_hdrc_imx_probe, - .remove = ci_hdrc_imx_remove, + .remove_new = ci_hdrc_imx_remove, .shutdown = ci_hdrc_imx_shutdown, .driver = { .name = "imx_usb", diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index 35dfc05854fb..3795c70a3155 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -2052,7 +2052,7 @@ static irqreturn_t udc_irq(struct ci_hdrc *ci) } } - if (USBi_UI & intr) + if ((USBi_UI | USBi_UEI) & intr) isr_tr_complete_handler(ci); if ((USBi_SLI & intr) && !(ci->suspended)) { diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 9ab40d106ab0..ead891de5e1d 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -360,7 +360,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf) static void acm_ctrl_irq(struct urb *urb) { struct acm *acm = urb->context; - struct usb_cdc_notification *dr = urb->transfer_buffer; + struct usb_cdc_notification *dr; unsigned int current_size = urb->actual_length; unsigned int expected_size, copy_size, alloc_size; int retval; @@ -387,14 +387,25 @@ static void acm_ctrl_irq(struct urb *urb) usb_mark_last_busy(acm->dev); - if (acm->nb_index) + if (acm->nb_index == 0) { + /* + * The first chunk of a message must contain at least the + * notification header with the length field, otherwise we + * can't get an expected_size. + */ + if (current_size < sizeof(struct usb_cdc_notification)) { + dev_dbg(&acm->control->dev, "urb too short\n"); + goto exit; + } + dr = urb->transfer_buffer; + } else { dr = (struct usb_cdc_notification *)acm->notification_buffer; - + } /* size = notification-header + (optional) data */ expected_size = sizeof(struct usb_cdc_notification) + le16_to_cpu(dr->wLength); - if (current_size < expected_size) { + if (acm->nb_index != 0 || current_size < expected_size) { /* notification is transmitted fragmented, reassemble */ if (acm->nb_size < expected_size) { u8 *new_buffer; @@ -1703,13 +1714,16 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ }, - { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */ + { USB_DEVICE(0x045b, 0x023c), /* Renesas R-Car H3 USB Download mode */ .driver_info = DISABLE_ECHO, /* Don't echo banner */ }, - { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */ + { USB_DEVICE(0x045b, 0x0247), /* Renesas R-Car D3 USB Download mode */ .driver_info = DISABLE_ECHO, /* Don't echo banner */ }, - { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */ + { USB_DEVICE(0x045b, 0x0248), /* Renesas R-Car M3-N USB Download mode */ + .driver_info = DISABLE_ECHO, /* Don't echo banner */ + }, + { USB_DEVICE(0x045b, 0x024D), /* Renesas R-Car E3 USB Download mode */ .driver_info = DISABLE_ECHO, /* Don't echo banner */ }, { USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */ diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index eb0f5d7cc756..e3408a67af45 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -726,7 +726,7 @@ static int wdm_open(struct inode *inode, struct file *file) rv = -EBUSY; goto out; } - + smp_rmb(); /* ordered against wdm_wwan_port_stop() */ rv = usb_autopm_get_interface(desc->intf); if (rv < 0) { dev_err(&desc->intf->dev, "Error autopm - %d\n", rv); @@ -829,6 +829,7 @@ static struct usb_class_driver wdm_class = { static int wdm_wwan_port_start(struct wwan_port *port) { struct wdm_device *desc = wwan_port_get_drvdata(port); + int rv; /* The interface is both exposed via the WWAN framework and as a * legacy usbmisc chardev. If chardev is already open, just fail @@ -848,7 +849,15 @@ static int wdm_wwan_port_start(struct wwan_port *port) wwan_port_txon(port); /* Start getting events */ - return usb_submit_urb(desc->validity, GFP_KERNEL); + rv = usb_submit_urb(desc->validity, GFP_KERNEL); + if (rv < 0) { + wwan_port_txoff(port); + desc->manage_power(desc->intf, 0); + /* this must be last lest we race with chardev open */ + clear_bit(WDM_WWAN_IN_USE, &desc->flags); + } + + return rv; } static void wdm_wwan_port_stop(struct wwan_port *port) @@ -859,8 +868,10 @@ static void wdm_wwan_port_stop(struct wwan_port *port) poison_urbs(desc); desc->manage_power(desc->intf, 0); clear_bit(WDM_READ, &desc->flags); - clear_bit(WDM_WWAN_IN_USE, &desc->flags); unpoison_urbs(desc); + smp_wmb(); /* ordered against wdm_open() */ + /* this must be last lest we open a poisoned device */ + clear_bit(WDM_WWAN_IN_USE, &desc->flags); } static void wdm_wwan_port_tx_complete(struct urb *urb) @@ -868,7 +879,7 @@ static void wdm_wwan_port_tx_complete(struct urb *urb) struct sk_buff *skb = urb->context; struct wdm_device *desc = skb_shinfo(skb)->destructor_arg; - usb_autopm_put_interface(desc->intf); + usb_autopm_put_interface_async(desc->intf); wwan_port_txon(desc->wwanp); kfree_skb(skb); } @@ -898,7 +909,7 @@ static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb) req->bRequestType = (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE); req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND; req->wValue = 0; - req->wIndex = desc->inum; + req->wIndex = desc->inum; /* already converted */ req->wLength = cpu_to_le16(skb->len); skb_shinfo(skb)->destructor_arg = desc; diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index f27b4aecff3d..759f567538e2 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -1337,11 +1337,12 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) return -EINVAL; + alts = usblp->protocol[protocol].alt_setting; + if (alts < 0) + return -EINVAL; + /* Don't unnecessarily set the interface if there's a single alt. */ if (usblp->intf->num_altsetting > 1) { - alts = usblp->protocol[protocol].alt_setting; - if (alts < 0) - return -EINVAL; r = usb_set_interface(usblp->dev, usblp->ifnum, alts); if (r < 0) { printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index c2e666e82857..2f92905e05ca 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -482,6 +482,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) u8 *buffer; u8 tag; int rv; + long wait_rv; dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n", data->iin_ep_present); @@ -511,16 +512,17 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) } if (data->iin_ep_present) { - rv = wait_event_interruptible_timeout( + wait_rv = wait_event_interruptible_timeout( data->waitq, atomic_read(&data->iin_data_valid) != 0, file_data->timeout); - if (rv < 0) { - dev_dbg(dev, "wait interrupted %d\n", rv); + if (wait_rv < 0) { + dev_dbg(dev, "wait interrupted %ld\n", wait_rv); + rv = wait_rv; goto exit; } - if (rv == 0) { + if (wait_rv == 0) { dev_dbg(dev, "wait timed out\n"); rv = -ETIMEDOUT; goto exit; @@ -539,6 +541,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb) dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv); + rv = 0; + exit: /* bump interrupt bTag */ data->iin_bTag += 1; @@ -602,9 +606,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, { struct usbtmc_device_data *data = file_data->data; struct device *dev = &data->intf->dev; - int rv; u32 timeout; unsigned long expire; + long wait_rv; if (!data->iin_ep_present) { dev_dbg(dev, "no interrupt endpoint present\n"); @@ -618,25 +622,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data, mutex_unlock(&data->io_mutex); - rv = wait_event_interruptible_timeout( - data->waitq, - atomic_read(&file_data->srq_asserted) != 0 || - atomic_read(&file_data->closing), - expire); + wait_rv = wait_event_interruptible_timeout( + data->waitq, + atomic_read(&file_data->srq_asserted) != 0 || + atomic_read(&file_data->closing), + expire); mutex_lock(&data->io_mutex); /* Note! disconnect or close could be called in the meantime */ if (atomic_read(&file_data->closing) || data->zombie) - rv = -ENODEV; + return -ENODEV; - if (rv < 0) { - /* dev can be invalid now! */ - pr_debug("%s - wait interrupted %d\n", __func__, rv); - return rv; + if (wait_rv < 0) { + dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv); + return wait_rv; } - if (rv == 0) { + if (wait_rv == 0) { dev_dbg(dev, "%s - wait timed out\n", __func__); return -ETIMEDOUT; } @@ -830,6 +833,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, unsigned long expire; int bufcount = 1; int again = 0; + long wait_rv; /* mutex already locked */ @@ -942,19 +946,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data, if (!(flags & USBTMC_FLAG_ASYNC)) { dev_dbg(dev, "%s: before wait time %lu\n", __func__, expire); - retval = wait_event_interruptible_timeout( + wait_rv = wait_event_interruptible_timeout( file_data->wait_bulk_in, usbtmc_do_transfer(file_data), expire); - dev_dbg(dev, "%s: wait returned %d\n", - __func__, retval); + dev_dbg(dev, "%s: wait returned %ld\n", + __func__, wait_rv); - if (retval <= 0) { - if (retval == 0) - retval = -ETIMEDOUT; + if (wait_rv < 0) { + retval = wait_rv; goto error; } + + if (wait_rv == 0) { + retval = -ETIMEDOUT; + goto error; + } + } urb = usb_get_from_anchor(&file_data->in_anchor); @@ -1380,7 +1389,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, if (!buffer) return -ENOMEM; - mutex_lock(&data->io_mutex); + retval = mutex_lock_interruptible(&data->io_mutex); + if (retval < 0) + goto exit_nolock; + if (data->zombie) { retval = -ENODEV; goto exit; @@ -1503,6 +1515,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf, exit: mutex_unlock(&data->io_mutex); +exit_nolock: kfree(buffer); return retval; } diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index e9ff3b672d6c..f702b41437e1 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1818,6 +1818,17 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) desc = intf->cur_altsetting; hdev = interface_to_usbdev(intf); + /* + * The USB 2.0 spec prohibits hubs from having more than one + * configuration or interface, and we rely on this prohibition. + * Refuse to accept a device that violates it. + */ + if (hdev->descriptor.bNumConfigurations > 1 || + hdev->actconfig->desc.bNumInterfaces > 1) { + dev_err(&intf->dev, "Invalid hub with more than one config or interface\n"); + return -EINVAL; + } + /* * Set default autosuspend delay as 0 to speedup bus suspend, * based on the below considerations: @@ -2633,13 +2644,13 @@ int usb_new_device(struct usb_device *udev) err = sysfs_create_link(&udev->dev.kobj, &port_dev->dev.kobj, "port"); if (err) - goto fail; + goto out_del_dev; err = sysfs_create_link(&port_dev->dev.kobj, &udev->dev.kobj, "device"); if (err) { sysfs_remove_link(&udev->dev.kobj, "port"); - goto fail; + goto out_del_dev; } if (!test_and_set_bit(port1, hub->child_usage_bits)) @@ -2651,6 +2662,8 @@ int usb_new_device(struct usb_device *udev) pm_runtime_put_sync_autosuspend(&udev->dev); return err; +out_del_dev: + device_del(&udev->dev); fail: usb_set_device_state(udev, USB_STATE_NOTATTACHED); pm_runtime_disable(&udev->dev); @@ -4650,7 +4663,6 @@ void usb_ep0_reinit(struct usb_device *udev) EXPORT_SYMBOL_GPL(usb_ep0_reinit); #define usb_sndaddr0pipe() (PIPE_CONTROL << 30) -#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN) static int hub_set_address(struct usb_device *udev, int devnum) { @@ -4756,7 +4768,7 @@ static int get_bMaxPacketSize0(struct usb_device *udev, for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) { /* Start with invalid values in case the transfer fails */ buf->bDescriptorType = buf->bMaxPacketSize0 = 0; - rc = usb_control_msg(udev, usb_rcvaddr0pipe(), + rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, USB_DT_DEVICE << 8, 0, buf, size, @@ -6007,6 +6019,36 @@ void usb_hub_cleanup(void) usb_deregister(&hub_driver); } /* usb_hub_cleanup() */ +/** + * hub_hc_release_resources - clear resources used by host controller + * @udev: pointer to device being released + * + * Context: task context, might sleep + * + * Function releases the host controller resources in correct order before + * making any operation on resuming usb device. The host controller resources + * allocated for devices in tree should be released starting from the last + * usb device in tree toward the root hub. This function is used only during + * resuming device when usb device require reinitialization – that is, when + * flag udev->reset_resume is set. + * + * This call is synchronous, and may not be used in an interrupt context. + */ +static void hub_hc_release_resources(struct usb_device *udev) +{ + struct usb_hub *hub = usb_hub_to_struct_hub(udev); + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + int i; + + /* Release up resources for all children before this device */ + for (i = 0; i < udev->maxchild; i++) + if (hub->ports[i]->child) + hub_hc_release_resources(hub->ports[i]->child); + + if (hcd->driver->reset_device) + hcd->driver->reset_device(hcd, udev); +} + /** * usb_reset_and_verify_device - perform a USB port reset to reinitialize a device * @udev: device to reset (not in SUSPENDED or NOTATTACHED state) @@ -6071,6 +6113,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev) bos = udev->bos; udev->bos = NULL; + if (udev->reset_resume) + hub_hc_release_resources(udev); + mutex_lock(hcd->address0_mutex); for (i = 0; i < PORT_INIT_TRIES; ++i) { diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c index 0007031fad0d..0c97f15adfc8 100644 --- a/drivers/usb/core/port.c +++ b/drivers/usb/core/port.c @@ -412,10 +412,11 @@ static int usb_port_runtime_suspend(struct device *dev) static void usb_port_shutdown(struct device *dev) { struct usb_port *port_dev = to_usb_port(dev); + struct usb_device *udev = port_dev->child; - if (port_dev->child) { - usb_disable_usb2_hardware_lpm(port_dev->child); - usb_unlocked_disable_lpm(port_dev->child); + if (udev && !udev->port_is_suspended) { + usb_disable_usb2_hardware_lpm(udev); + usb_unlocked_disable_lpm(udev); } } diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 022e8caba854..2a833b8102b0 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -348,6 +348,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0638, 0x0a13), .driver_info = USB_QUIRK_STRING_FETCH_255 }, + /* Prolific Single-LUN Mass Storage Card Reader */ + { USB_DEVICE(0x067b, 0x2731), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_NO_LPM }, + /* Saitek Cyborg Gold Joystick */ { USB_DEVICE(0x06a3, 0x0006), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -372,6 +376,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, + /* SanDisk Corp. SanDisk 3.2Gen1 */ + { USB_DEVICE(0x0781, 0x55a3), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Realforce 87U Keyboard */ { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, @@ -386,6 +393,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0904, 0x6103), .driver_info = USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, + /* Silicon Motion Flash Drive */ + { USB_DEVICE(0x090c, 0x1000), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Sound Devices USBPre2 */ { USB_DEVICE(0x0926, 0x0202), .driver_info = USB_QUIRK_ENDPOINT_IGNORE }, @@ -444,6 +454,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0c45, 0x7056), .driver_info = USB_QUIRK_IGNORE_REMOTE_WAKEUP }, + /* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */ + { USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM }, + /* Action Semiconductor flash disk */ { USB_DEVICE(0x10d6, 0x2200), .driver_info = USB_QUIRK_STRING_FETCH_255 }, @@ -534,10 +547,16 @@ static const struct usb_device_id usb_quirk_list[] = { /* Blackmagic Design UltraStudio SDI */ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, + /* Teclast disk */ + { USB_DEVICE(0x1f75, 0x0917), .driver_info = USB_QUIRK_NO_LPM }, + /* Hauppauge HVR-950q */ { USB_DEVICE(0x2040, 0x7200), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* VLI disk */ + { USB_DEVICE(0x2109, 0x0711), .driver_info = USB_QUIRK_NO_LPM }, + /* Lexar Flash */ { USB_DEVICE(0x21c4, 0x0cd1), .driver_info = USB_QUIRK_NO_LPM }, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index da37aee22b72..8e18195b7390 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -888,10 +888,10 @@ static void dwc2_gadget_config_nonisoc_xfer_ddma(struct dwc2_hsotg_ep *hs_ep, } /* DMA sg buffer */ - for_each_sg(ureq->sg, sg, ureq->num_sgs, i) { + for_each_sg(ureq->sg, sg, ureq->num_mapped_sgs, i) { dwc2_gadget_fill_nonisoc_xfer_ddma_one(hs_ep, &desc, sg_dma_address(sg) + sg->offset, sg_dma_len(sg), - sg_is_last(sg)); + (i == (ureq->num_mapped_sgs - 1))); desc_count += hs_ep->desc_count; } @@ -4656,6 +4656,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget) spin_lock_irqsave(&hsotg->lock, flags); hsotg->driver = NULL; + hsotg->gadget.dev.of_node = NULL; hsotg->gadget.speed = USB_SPEED_UNKNOWN; hsotg->enabled = 0; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 97605ee7a759..a36002b67a7c 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -3569,11 +3569,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, port_status |= USB_PORT_STAT_C_OVERCURRENT << 16; } - if (!hsotg->flags.b.port_connect_status) { + if (dwc2_is_device_mode(hsotg)) { /* - * The port is disconnected, which means the core is - * either in device mode or it soon will be. Just - * return 0's for the remainder of the port status + * Just return 0's for the remainder of the port status * since the port register can't be read if the core * is in device mode. */ @@ -3643,13 +3641,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq, if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1)) goto error; - if (!hsotg->flags.b.port_connect_status) { + if (dwc2_is_device_mode(hsotg)) { /* - * The port is disconnected, which means the core is - * either in device mode or it soon will be. Just - * return without doing anything since the port - * register can't be written if the core is in device - * mode. + * Just return 0's for the remainder of the port status + * since the port register can't be read if the core + * is in device mode. */ break; } @@ -4372,7 +4368,7 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd) if (hsotg->bus_suspended) goto skip_power_saving; - if (hsotg->flags.b.port_connect_status == 0) + if (!(dwc2_read_hprt0(hsotg) & HPRT0_CONNSTS)) goto skip_power_saving; switch (hsotg->params.power_down) { @@ -4456,6 +4452,7 @@ static int _dwc2_hcd_resume(struct usb_hcd *hcd) * Power Down mode. */ if (hprt0 & HPRT0_CONNSTS) { + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); hsotg->lx_state = DWC2_L0; goto unlock; } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index c53e6072d405..bfaa4b96c3e2 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -125,11 +125,24 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable) dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); } -void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode) +void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy) { + unsigned int hw_mode; u32 reg; reg = dwc3_readl(dwc->regs, DWC3_GCTL); + + /* + * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and + * GUSB2PHYCFG.SUSPHY should be cleared during mode switching, + * and they can be set after core initialization. + */ + hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0); + if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) { + if (DWC3_GCTL_PRTCAP(reg) != mode) + dwc3_enable_susphy(dwc, false); + } + reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG)); reg |= DWC3_GCTL_PRTCAPDIR(mode); dwc3_writel(dwc->regs, DWC3_GCTL, reg); @@ -222,7 +235,7 @@ static void __dwc3_set_mode(struct work_struct *work) spin_lock_irqsave(&dwc->lock, flags); - dwc3_set_prtcap(dwc, desired_dr_role); + dwc3_set_prtcap(dwc, desired_dr_role, false); spin_unlock_irqrestore(&dwc->lock, flags); @@ -759,16 +772,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc) */ reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX; - /* - * Above DWC_usb3.0 1.94a, it is recommended to set - * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration. - * So default value will be '0' when the core is reset. Application - * needs to set it to '1' after the core initialization is completed. - * - * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be - * cleared after power-on reset, and it can be set after core - * initialization. - */ + /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */ reg &= ~DWC3_GUSB3PIPECTL_SUSPHY; if (dwc->u2ss_inp3_quirk) @@ -841,15 +845,7 @@ static int dwc3_phy_setup(struct dwc3 *dwc) break; } - /* - * Above DWC_usb3.0 1.94a, it is recommended to set - * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration. - * So default value will be '0' when the core is reset. Application - * needs to set it to '1' after the core initialization is completed. - * - * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared - * after power-on reset, and it can be set after core initialization. - */ + /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; if (dwc->dis_enblslpm_quirk) @@ -889,6 +885,25 @@ static int dwc3_clk_enable(struct dwc3 *dwc) if (ret) goto disable_utmi_clk; + /* + * Above DWC_usb3.0 1.94a, it is recommended to set + * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during + * coreConsultant configuration. So default value will be '0' when the + * core is reset. Application needs to set it to '1' after the core + * initialization is completed. + * + * Certain phy requires to be in P0 power state during initialization. + * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear + * prior to phy init to maintain in the P0 state. + * + * After phy initialization, some phy operations can only be executed + * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and + * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid + * blocking phy ops. + */ + if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) + dwc3_enable_susphy(dwc, true); + return 0; disable_utmi_clk: @@ -1508,7 +1523,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) switch (dwc->dr_mode) { case USB_DR_MODE_PERIPHERAL: - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false); if (dwc->usb2_phy) otg_set_vbus(dwc->usb2_phy->otg, false); @@ -1520,7 +1535,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc) return dev_err_probe(dev, ret, "failed to initialize gadget\n"); break; case USB_DR_MODE_HOST: - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false); if (dwc->usb2_phy) otg_set_vbus(dwc->usb2_phy->otg, true); @@ -1563,7 +1578,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc) } /* de-assert DRVVBUS for HOST and OTG mode */ - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); } static void dwc3_get_properties(struct dwc3 *dwc) @@ -1581,8 +1596,6 @@ static void dwc3_get_properties(struct dwc3 *dwc) u8 tx_thr_num_pkt_prd = 0; u8 tx_max_burst_prd = 0; u8 tx_fifo_resize_max_num; - const char *usb_psy_name; - int ret; /* default to highest possible threshold */ lpm_nyet_threshold = 0xf; @@ -1615,13 +1628,6 @@ static void dwc3_get_properties(struct dwc3 *dwc) else dwc->sysdev = dwc->dev; - ret = device_property_read_string(dev, "usb-psy-name", &usb_psy_name); - if (ret >= 0) { - dwc->usb_psy = power_supply_get_by_name(usb_psy_name); - if (!dwc->usb_psy) - dev_err(dev, "couldn't get usb power supply\n"); - } - dwc->has_lpm_erratum = device_property_read_bool(dev, "snps,has-lpm-erratum"); device_property_read_u8(dev, "snps,lpm-nyet-threshold", @@ -1737,8 +1743,6 @@ static void dwc3_get_properties(struct dwc3 *dwc) dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd; dwc->tx_max_burst_prd = tx_max_burst_prd; - dwc->imod_interval = 0; - dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num; } @@ -1756,21 +1760,19 @@ static void dwc3_check_params(struct dwc3 *dwc) unsigned int hwparam_gen = DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); - /* Check for proper value of imod_interval */ - if (dwc->imod_interval && !dwc3_has_imod(dwc)) { - dev_warn(dwc->dev, "Interrupt moderation not supported\n"); - dwc->imod_interval = 0; - } - /* + * Enable IMOD for all supporting controllers. + * + * Particularly, DWC_usb3 v3.00a must enable this feature for + * the following reason: + * * Workaround for STAR 9000961433 which affects only version * 3.00a of the DWC_usb3 core. This prevents the controller * interrupt from being masked while handling events. IMOD * allows us to work around this issue. Enable it for the * affected version. */ - if (!dwc->imod_interval && - DWC3_VER_IS(DWC3, 300A)) + if (dwc3_has_imod((dwc))) dwc->imod_interval = 1; /* Check the maximum_speed parameter */ @@ -1973,6 +1975,23 @@ static int dwc3_get_clocks(struct dwc3 *dwc) return 0; } +static struct power_supply *dwc3_get_usb_power_supply(struct dwc3 *dwc) +{ + struct power_supply *usb_psy; + const char *usb_psy_name; + int ret; + + ret = device_property_read_string(dwc->dev, "usb-psy-name", &usb_psy_name); + if (ret < 0) + return NULL; + + usb_psy = power_supply_get_by_name(usb_psy_name); + if (!usb_psy) + return ERR_PTR(-EPROBE_DEFER); + + return usb_psy; +} + static int dwc3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2017,6 +2036,10 @@ static int dwc3_probe(struct platform_device *pdev) dwc3_get_properties(dwc); + dwc->usb_psy = dwc3_get_usb_power_supply(dwc); + if (IS_ERR(dwc->usb_psy)) + return dev_err_probe(dev, PTR_ERR(dwc->usb_psy), "couldn't get usb power supply\n"); + dwc->reset = devm_reset_control_array_get_optional_shared(dev); if (IS_ERR(dwc->reset)) { ret = PTR_ERR(dwc->reset); @@ -2299,7 +2322,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (ret) return ret; - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true); dwc3_gadget_resume(dwc); break; case DWC3_GCTL_PRTCAP_HOST: @@ -2307,7 +2330,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) ret = dwc3_core_init_for_resume(dwc); if (ret) return ret; - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true); break; } /* Restore GUSB2PHYCFG bits that were modified in suspend */ @@ -2332,7 +2355,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (ret) return ret; - dwc3_set_prtcap(dwc, dwc->current_dr_role); + dwc3_set_prtcap(dwc, dwc->current_dr_role, true); dwc3_otg_init(dwc); if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) { diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 3d63f70e2645..14701c73581a 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -450,6 +450,7 @@ #define DWC3_DCTL_TRGTULST_SS_INACT (DWC3_DCTL_TRGTULST(6)) /* These apply for core versions 1.94a and later */ +#define DWC3_DCTL_NYET_THRES_MASK (0xf << 20) #define DWC3_DCTL_NYET_THRES(n) (((n) & 0xf) << 20) #define DWC3_DCTL_KEEP_CONNECT BIT(19) @@ -747,6 +748,7 @@ struct dwc3_ep { #define DWC3_EP_PENDING_CLEAR_STALL BIT(11) #define DWC3_EP_TXFIFO_RESIZED BIT(12) #define DWC3_EP_DELAY_STOP BIT(13) +#define DWC3_EP_RESOURCE_ALLOCATED BIT(14) /* This last one is specific to EP0 */ #define DWC3_EP0_DIR_IN BIT(31) @@ -1532,7 +1534,7 @@ struct dwc3_gadget_ep_cmd_params { #define DWC3_HAS_OTG BIT(3) /* prototypes */ -void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode); +void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy); void dwc3_set_mode(struct dwc3 *dwc, u32 mode); u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type); diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index 3bf470032e1f..316651ab4b39 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -173,7 +173,7 @@ void dwc3_otg_init(struct dwc3 *dwc) * block "Initialize GCTL for OTG operation". */ /* GCTL.PrtCapDir=2'b11 */ - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true); /* GUSB2PHYCFG0.SusPHY=0 */ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; @@ -580,7 +580,7 @@ int dwc3_drd_init(struct dwc3 *dwc) dwc3_drd_update(dwc); } else { - dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG); + dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true); /* use OTG block to get ID event */ irq = dwc3_otg_get_irq(dwc); diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c index ad8a2eadb472..b0bb8edbfef7 100644 --- a/drivers/usb/dwc3/dwc3-am62.c +++ b/drivers/usb/dwc3/dwc3-am62.c @@ -146,6 +146,7 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62) if (ret) return ret; + of_node_put(args.np); am62->offset = args.args[0]; ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code); @@ -263,6 +264,7 @@ static int dwc3_ti_remove(struct platform_device *pdev) pm_runtime_put_sync(dev); pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); pm_runtime_set_suspended(dev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 6110ab1f9131..e2401cc4f155 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -143,11 +143,21 @@ static const struct property_entry dwc3_pci_intel_byt_properties[] = { {} }; +/* + * Intel Merrifield SoC uses these endpoints for tracing and they cannot + * be re-allocated if being used because the side band flow control signals + * are hard wired to certain endpoints: + * - 1 High BW Bulk IN (IN#1) (RTIT) + * - 1 1KB BW Bulk IN (IN#8) + 1 1KB BW Bulk OUT (Run Control) (OUT#8) + */ +static const u8 dwc3_pci_mrfld_reserved_endpoints[] = { 3, 16, 17 }; + static const struct property_entry dwc3_pci_mrfld_properties[] = { PROPERTY_ENTRY_STRING("dr_mode", "otg"), PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"), PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"), PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), + PROPERTY_ENTRY_U8_ARRAY("snps,reserved-endpoints", dwc3_pci_mrfld_reserved_endpoints), PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"), PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), {} diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c index 0745e9f11b2e..8070c55c2170 100644 --- a/drivers/usb/dwc3/dwc3-xilinx.c +++ b/drivers/usb/dwc3/dwc3-xilinx.c @@ -122,8 +122,11 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data) * in use but the usb3-phy entry is missing from the device tree. * Therefore, skip these operations in this case. */ - if (!priv_data->usb3_phy) + if (!priv_data->usb3_phy) { + /* Deselect the PIPE Clock Select bit in FPD PIPE Clock register */ + writel(PIPE_CLK_DESELECT, priv_data->regs + XLNX_USB_FPD_PIPE_CLK); goto skip_usb3_phy; + } crst = devm_reset_control_get_exclusive(dev, "usb_crst"); if (IS_ERR(crst)) { @@ -205,15 +208,13 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data) skip_usb3_phy: /* ulpi reset via gpio-modepin or gpio-framework driver */ - reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(reset_gpio)) { return dev_err_probe(dev, PTR_ERR(reset_gpio), "Failed to request reset GPIO\n"); } if (reset_gpio) { - /* Toggle ulpi to reset the phy. */ - gpiod_set_value_cansleep(reset_gpio, 1); usleep_range(5000, 10000); gpiod_set_value_cansleep(reset_gpio, 0); usleep_range(5000, 10000); diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index cab8c7a95c30..81f3538eba01 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -224,12 +224,14 @@ void dwc3_ep0_stall_and_restart(struct dwc3 *dwc) /* reinitialize physical ep1 */ dep = dwc->eps[1]; - dep->flags = DWC3_EP_ENABLED; + dep->flags &= DWC3_EP_RESOURCE_ALLOCATED; + dep->flags |= DWC3_EP_ENABLED; /* stall is always issued on EP0 */ dep = dwc->eps[0]; __dwc3_gadget_ep_set_halt(dep, 1, false); - dep->flags = DWC3_EP_ENABLED; + dep->flags &= DWC3_EP_RESOURCE_ALLOCATED | DWC3_EP_TRANSFER_STARTED; + dep->flags |= DWC3_EP_ENABLED; dwc->delayed_status = false; if (!list_empty(&dep->pending_list)) { @@ -654,6 +656,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) return -EINVAL; case USB_STATE_ADDRESS: + dwc3_gadget_start_config(dwc, 2); dwc3_gadget_clear_tx_fifos(dwc); ret = dwc3_ep0_delegate_req(dwc, ctrl); diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 18e140a2e3d0..f8b2ca5d92fa 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -523,76 +523,61 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep) static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; + int ret; + + if (dep->flags & DWC3_EP_RESOURCE_ALLOCATED) + return 0; memset(¶ms, 0x00, sizeof(params)); params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1); - return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, + ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE, ¶ms); + if (ret) + return ret; + + dep->flags |= DWC3_EP_RESOURCE_ALLOCATED; + return 0; } /** - * dwc3_gadget_start_config - configure ep resources - * @dep: endpoint that is being enabled + * dwc3_gadget_start_config - reset endpoint resources + * @dwc: pointer to the DWC3 context + * @resource_index: DEPSTARTCFG.XferRscIdx value (must be 0 or 2) * - * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's - * completion, it will set Transfer Resource for all available endpoints. + * Set resource_index=0 to reset all endpoints' resources allocation. Do this as + * part of the power-on/soft-reset initialization. * - * The assignment of transfer resources cannot perfectly follow the data book - * due to the fact that the controller driver does not have all knowledge of the - * configuration in advance. It is given this information piecemeal by the - * composite gadget framework after every SET_CONFIGURATION and - * SET_INTERFACE. Trying to follow the databook programming model in this - * scenario can cause errors. For two reasons: - * - * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every - * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is - * incorrect in the scenario of multiple interfaces. - * - * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new - * endpoint on alt setting (8.1.6). - * - * The following simplified method is used instead: - * - * All hardware endpoints can be assigned a transfer resource and this setting - * will stay persistent until either a core reset or hibernation. So whenever we - * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do - * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are - * guaranteed that there are as many transfer resources as endpoints. - * - * This function is called for each endpoint when it is being enabled but is - * triggered only when called for EP0-out, which always happens first, and which - * should only happen in one of the above conditions. + * Set resource_index=2 to reset only non-control endpoints' resources. Do this + * on receiving the SET_CONFIGURATION request or hibernation resume. */ -static int dwc3_gadget_start_config(struct dwc3_ep *dep) +int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index) { struct dwc3_gadget_ep_cmd_params params; - struct dwc3 *dwc; + struct dwc3_ep *dep; u32 cmd; int i; int ret; - if (dep->number) - return 0; + if (resource_index != 0 && resource_index != 2) + return -EINVAL; memset(¶ms, 0x00, sizeof(params)); cmd = DWC3_DEPCMD_DEPSTARTCFG; - dwc = dep->dwc; + cmd |= DWC3_DEPCMD_PARAM(resource_index); - ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); + ret = dwc3_send_gadget_ep_cmd(dwc->eps[0], cmd, ¶ms); if (ret) return ret; - for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { - struct dwc3_ep *dep = dwc->eps[i]; - + /* Reset resource allocation flags */ + for (i = resource_index; i < dwc->num_eps; i++) { + dep = dwc->eps[i]; if (!dep) continue; - ret = dwc3_gadget_set_xfer_resource(dep); - if (ret) - return ret; + dep->flags &= ~DWC3_EP_RESOURCE_ALLOCATED; } return 0; @@ -785,9 +770,11 @@ void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc) dwc->last_fifo_depth = fifo_depth; /* Clear existing TXFIFO for all IN eps except ep0 */ - for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); - num += 2) { + for (num = 3; num < min_t(int, dwc->num_eps, DWC3_ENDPOINTS_NUM); num += 2) { dep = dwc->eps[num]; + if (!dep) + continue; + /* Don't change TXFRAMNUM on usb31 version */ size = DWC3_IP_IS(DWC3) ? 0 : dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1)) & @@ -1029,16 +1016,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action) ret = dwc3_gadget_resize_tx_fifos(dep); if (ret) return ret; - - ret = dwc3_gadget_start_config(dep); - if (ret) - return ret; } ret = dwc3_gadget_set_ep_config(dep, action); if (ret) return ret; + if (!(dep->flags & DWC3_EP_RESOURCE_ALLOCATED)) { + ret = dwc3_gadget_set_xfer_resource(dep); + if (ret) + return ret; + } + if (!(dep->flags & DWC3_EP_ENABLED)) { struct dwc3_trb *trb_st_hw; struct dwc3_trb *trb_link; @@ -1192,7 +1181,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) dep->stream_capable = false; dep->type = 0; - mask = DWC3_EP_TXFIFO_RESIZED; + mask = DWC3_EP_TXFIFO_RESIZED | DWC3_EP_RESOURCE_ALLOCATED; /* * dwc3_remove_requests() can exit early if DWC3 EP delayed stop is * set. Do not clear DEP flags, so that the end transfer command will @@ -1344,11 +1333,14 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) * pending to be processed by the driver. */ if (dep->trb_enqueue == dep->trb_dequeue) { + struct dwc3_request *req; + /* - * If there is any request remained in the started_list at - * this point, that means there is no TRB available. + * If there is any request remained in the started_list with + * active TRBs at this point, then there is no TRB available. */ - if (!list_empty(&dep->started_list)) + req = next_request(&dep->started_list); + if (req && req->num_trbs) return 0; return DWC3_TRB_NUM - 1; @@ -1581,8 +1573,8 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep, struct scatterlist *s; int i; unsigned int length = req->request.length; - unsigned int remaining = req->request.num_mapped_sgs - - req->num_queued_sgs; + unsigned int remaining = req->num_pending_sgs; + unsigned int num_queued_sgs = req->request.num_mapped_sgs - remaining; unsigned int num_trbs = req->num_trbs; bool needs_extra_trb = dwc3_needs_extra_trb(dep, req); @@ -1590,7 +1582,7 @@ static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep, * If we resume preparing the request, then get the remaining length of * the request and resume where we left off. */ - for_each_sg(req->request.sg, s, req->num_queued_sgs, i) + for_each_sg(req->request.sg, s, num_queued_sgs, i) length -= sg_dma_len(s); for_each_sg(sg, s, remaining, i) { @@ -2654,10 +2646,38 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) { u32 reg; u32 timeout = 2000; + u32 saved_config = 0; if (pm_runtime_suspended(dwc->dev)) return 0; + /* + * When operating in USB 2.0 speeds (HS/FS), ensure that + * GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY are cleared before starting + * or stopping the controller. This resolves timeout issues that occur + * during frequent role switches between host and device modes. + * + * Save and clear these settings, then restore them after completing the + * controller start or stop sequence. + * + * This solution was discovered through experimentation as it is not + * mentioned in the dwc3 programming guide. It has been tested on an + * Exynos platforms. + */ + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + if (reg & DWC3_GUSB2PHYCFG_SUSPHY) { + saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; + reg &= ~DWC3_GUSB2PHYCFG_SUSPHY; + } + + if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) { + saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM; + reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM; + } + + if (saved_config) + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + reg = dwc3_readl(dwc->regs, DWC3_DCTL); if (is_on) { if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) { @@ -2685,6 +2705,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) reg &= DWC3_DSTS_DEVCTRLHLT; } while (--timeout && !(!is_on ^ !reg)); + if (saved_config) { + reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); + reg |= saved_config; + dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + } + if (!timeout) return -ETIMEDOUT; @@ -2970,6 +2996,12 @@ static int __dwc3_gadget_start(struct dwc3 *dwc) /* Start with SuperSpeed Default */ dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); + ret = dwc3_gadget_start_config(dwc, 0); + if (ret) { + dev_err(dwc->dev, "failed to config endpoints\n"); + return ret; + } + dep = dwc->eps[0]; dep->flags = 0; ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT); @@ -3746,6 +3778,8 @@ out: for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) { dep = dwc->eps[i]; + if (!dep) + continue; if (!(dep->flags & DWC3_EP_ENABLED)) continue; @@ -3934,6 +3968,10 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, u8 epnum = event->endpoint_number; dep = dwc->eps[epnum]; + if (!dep) { + dev_warn(dwc->dev, "spurious event, endpoint %u is not allocated\n", epnum); + return; + } if (!(dep->flags & DWC3_EP_ENABLED)) { if ((epnum > 1) && !(dep->flags & DWC3_EP_TRANSFER_STARTED)) @@ -4311,8 +4349,10 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) WARN_ONCE(DWC3_VER_IS_PRIOR(DWC3, 240A) && dwc->has_lpm_erratum, "LPM Erratum not available on dwc3 revisions < 2.40a\n"); - if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A)) + if (dwc->has_lpm_erratum && !DWC3_VER_IS_PRIOR(DWC3, 240A)) { + reg &= ~DWC3_DCTL_NYET_THRES_MASK; reg |= DWC3_DCTL_NYET_THRES(dwc->lpm_nyet_threshold); + } dwc3_gadget_dctl_write_safe(dwc, reg); } else { @@ -4567,14 +4607,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), DWC3_GEVNTSIZ_SIZE(evt->length)); + evt->flags &= ~DWC3_EVENT_PENDING; + /* + * Add an explicit write memory barrier to make sure that the update of + * clearing DWC3_EVENT_PENDING is observed in dwc3_check_event_buf() + */ + wmb(); + if (dwc->imod_interval) { dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB); dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); } - /* Keep the clearing of DWC3_EVENT_PENDING at the end */ - evt->flags &= ~DWC3_EVENT_PENDING; - return ret; } @@ -4626,6 +4670,12 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) if (!count) return IRQ_NONE; + if (count > evt->length) { + dev_err_ratelimited(dwc->dev, "invalid count(%u) > evt->length(%u)\n", + count, evt->length); + return IRQ_NONE; + } + evt->count = count; evt->flags |= DWC3_EVENT_PENDING; diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index 242426b67798..e87dda66ac17 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -135,6 +135,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol); void dwc3_ep0_send_delayed_status(struct dwc3 *dwc); void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt); +int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index); /** * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index f14ad65acad7..e13cd7e70b90 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1018,10 +1018,11 @@ static int set_config(struct usb_composite_dev *cdev, else usb_gadget_set_remote_wakeup(gadget, 0); done: - if (power <= USB_SELF_POWER_VBUS_MAX_DRAW) - usb_gadget_set_selfpowered(gadget); - else + if (power > USB_SELF_POWER_VBUS_MAX_DRAW || + (c && !(c->bmAttributes & USB_CONFIG_ATT_SELFPOWER))) usb_gadget_clear_selfpowered(gadget); + else + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, power); if (result >= 0 && cdev->delayed_status) @@ -2493,7 +2494,10 @@ void composite_suspend(struct usb_gadget *gadget) cdev->suspended = 1; - usb_gadget_set_selfpowered(gadget); + if (cdev->config && + cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER) + usb_gadget_set_selfpowered(gadget); + usb_gadget_vbus_draw(gadget, 2); } @@ -2522,8 +2526,11 @@ void composite_resume(struct usb_gadget *gadget) else maxpower = min(maxpower, 900U); - if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW) + if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW || + !(cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER)) usb_gadget_clear_selfpowered(gadget); + else + usb_gadget_set_selfpowered(gadget); usb_gadget_vbus_draw(gadget, maxpower); } diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 1165902e0b15..679d28742fe7 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1868,7 +1868,7 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) ENTER(); - if (WARN_ON(ffs->state != FFS_ACTIVE + if ((ffs->state != FFS_ACTIVE || test_and_set_bit(FFS_FL_BOUND, &ffs->flags))) return -EBADFD; diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 2d02f25f9597..0f6b32e4e5be 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -282,7 +282,7 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req) /* Our transmit completed. See if there's more to go. * f_midi_transmit eats req, don't queue it again. */ req->length = 0; - f_midi_transmit(midi); + queue_work(system_highpri_wq, &midi->work); return; } break; @@ -999,11 +999,11 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f) } /* configure the endpoint descriptors ... */ - ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); - ms_out_desc.bNumEmbMIDIJack = midi->in_ports; + ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); + ms_out_desc.bNumEmbMIDIJack = midi->out_ports; - ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports); - ms_in_desc.bNumEmbMIDIJack = midi->out_ports; + ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports); + ms_in_desc.bNumEmbMIDIJack = midi->in_ports; /* ... and add them to the list */ endpoint_descriptor_index = i; diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index c21acebe8aae..3c9541357c24 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -245,7 +245,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd) { struct f_uas *fu = cmd->fu; struct se_cmd *se_cmd = &cmd->se_cmd; - struct usb_gadget *gadget = fuas_to_gadget(fu); int ret; init_completion(&cmd->write_complete); @@ -256,22 +255,6 @@ static int bot_send_write_request(struct usbg_cmd *cmd) return -EINVAL; } - if (!gadget->sg_supported) { - cmd->data_buf = kmalloc(se_cmd->data_length, GFP_KERNEL); - if (!cmd->data_buf) - return -ENOMEM; - - fu->bot_req_out->buf = cmd->data_buf; - } else { - fu->bot_req_out->buf = NULL; - fu->bot_req_out->num_sgs = se_cmd->t_data_nents; - fu->bot_req_out->sg = se_cmd->t_data_sg; - } - - fu->bot_req_out->complete = usbg_data_write_cmpl; - fu->bot_req_out->length = se_cmd->data_length; - fu->bot_req_out->context = cmd; - ret = usbg_prepare_w_request(cmd, fu->bot_req_out); if (ret) goto cleanup; @@ -973,6 +956,7 @@ static void usbg_data_write_cmpl(struct usb_ep *ep, struct usb_request *req) return; cleanup: + target_put_sess_cmd(se_cmd); transport_generic_free_cmd(&cmd->se_cmd, 0); } @@ -1065,8 +1049,7 @@ static void usbg_cmd_work(struct work_struct *work) out: transport_send_check_condition_and_sense(se_cmd, - TCM_UNSUPPORTED_SCSI_OPCODE, 1); - transport_generic_free_cmd(&cmd->se_cmd, 0); + TCM_UNSUPPORTED_SCSI_OPCODE, 0); } static struct usbg_cmd *usbg_get_cmd(struct f_uas *fu, @@ -1194,8 +1177,7 @@ static void bot_cmd_work(struct work_struct *work) out: transport_send_check_condition_and_sense(se_cmd, - TCM_UNSUPPORTED_SCSI_OPCODE, 1); - transport_generic_free_cmd(&cmd->se_cmd, 0); + TCM_UNSUPPORTED_SCSI_OPCODE, 0); } static int bot_submit_command(struct f_uas *fu, @@ -1999,43 +1981,39 @@ static int tcm_bind(struct usb_configuration *c, struct usb_function *f) bot_intf_desc.bInterfaceNumber = iface; uasp_intf_desc.bInterfaceNumber = iface; fu->iface = iface; - ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bi_desc, - &uasp_bi_ep_comp_desc); + ep = usb_ep_autoconfig(gadget, &uasp_fs_bi_desc); if (!ep) goto ep_fail; fu->ep_in = ep; - ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_bo_desc, - &uasp_bo_ep_comp_desc); + ep = usb_ep_autoconfig(gadget, &uasp_fs_bo_desc); if (!ep) goto ep_fail; fu->ep_out = ep; - ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_status_desc, - &uasp_status_in_ep_comp_desc); + ep = usb_ep_autoconfig(gadget, &uasp_fs_status_desc); if (!ep) goto ep_fail; fu->ep_status = ep; - ep = usb_ep_autoconfig_ss(gadget, &uasp_ss_cmd_desc, - &uasp_cmd_comp_desc); + ep = usb_ep_autoconfig(gadget, &uasp_fs_cmd_desc); if (!ep) goto ep_fail; fu->ep_cmd = ep; /* Assume endpoint addresses are the same for both speeds */ - uasp_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress; - uasp_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress; + uasp_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress; + uasp_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress; uasp_status_desc.bEndpointAddress = - uasp_ss_status_desc.bEndpointAddress; - uasp_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress; + uasp_fs_status_desc.bEndpointAddress; + uasp_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress; - uasp_fs_bi_desc.bEndpointAddress = uasp_ss_bi_desc.bEndpointAddress; - uasp_fs_bo_desc.bEndpointAddress = uasp_ss_bo_desc.bEndpointAddress; - uasp_fs_status_desc.bEndpointAddress = - uasp_ss_status_desc.bEndpointAddress; - uasp_fs_cmd_desc.bEndpointAddress = uasp_ss_cmd_desc.bEndpointAddress; + uasp_ss_bi_desc.bEndpointAddress = uasp_fs_bi_desc.bEndpointAddress; + uasp_ss_bo_desc.bEndpointAddress = uasp_fs_bo_desc.bEndpointAddress; + uasp_ss_status_desc.bEndpointAddress = + uasp_fs_status_desc.bEndpointAddress; + uasp_ss_cmd_desc.bEndpointAddress = uasp_fs_cmd_desc.bEndpointAddress; ret = usb_assign_descriptors(f, uasp_fs_function_desc, uasp_hs_function_desc, uasp_ss_function_desc, @@ -2079,9 +2057,14 @@ static void tcm_delayed_set_alt(struct work_struct *wq) static int tcm_get_alt(struct usb_function *f, unsigned intf) { - if (intf == bot_intf_desc.bInterfaceNumber) + struct f_uas *fu = to_f_uas(f); + + if (fu->iface != intf) + return -EOPNOTSUPP; + + if (fu->flags & USBG_IS_BOT) return USB_G_ALT_INT_BBB; - if (intf == uasp_intf_desc.bInterfaceNumber) + else if (fu->flags & USBG_IS_UAS) return USB_G_ALT_INT_UAS; return -EOPNOTSUPP; @@ -2091,6 +2074,9 @@ static int tcm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_uas *fu = to_f_uas(f); + if (fu->iface != intf) + return -EOPNOTSUPP; + if ((alt == USB_G_ALT_INT_BBB) || (alt == USB_G_ALT_INT_UAS)) { struct guas_setup_wq *work; diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index b3dc5f5164f4..eabd10817e1e 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -1176,6 +1176,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) uac2->as_in_alt = 0; } + std_ac_if_desc.bNumEndpoints = 0; if (FUOUT_EN(uac2_opts) || FUIN_EN(uac2_opts)) { uac2->int_ep = usb_ep_autoconfig(gadget, &fs_ep_int_desc); if (!uac2->int_ep) { diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 1f143c6ba86b..8802dd53fbea 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -570,9 +570,12 @@ static int gs_start_io(struct gs_port *port) * we didn't in gs_start_tx() */ tty_wakeup(port->port.tty); } else { - gs_free_requests(ep, head, &port->read_allocated); - gs_free_requests(port->port_usb->in, &port->write_pool, - &port->write_allocated); + /* Free reqs only if we are still connected */ + if (port->port_usb) { + gs_free_requests(ep, head, &port->read_allocated); + gs_free_requests(port->port_usb->in, &port->write_pool, + &port->write_allocated); + } status = -EIO; } diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c index 7f257d18c2d2..18e40e4e94ae 100644 --- a/drivers/usb/gadget/function/uvc_v4l2.c +++ b/drivers/usb/gadget/function/uvc_v4l2.c @@ -140,6 +140,9 @@ static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc, list_for_each_entry(format, &uvc->header->formats, entry) { struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt); + if (IS_ERR(fmtdesc)) + continue; + if (fmtdesc->fcc == pixelformat) { uformat = format->fmt; break; @@ -260,6 +263,7 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt) struct uvc_video *video = &uvc->video; struct uvcg_format *uformat; struct uvcg_frame *uframe; + const struct uvc_format_desc *fmtdesc; u8 *fcc; if (fmt->type != video->queue.queue.type) @@ -285,7 +289,10 @@ uvc_v4l2_try_format(struct file *file, void *fh, struct v4l2_format *fmt) fmt->fmt.pix.field = V4L2_FIELD_NONE; fmt->fmt.pix.bytesperline = uvc_v4l2_get_bytesperline(uformat, uframe); fmt->fmt.pix.sizeimage = uvc_get_frame_size(uformat, uframe); - fmt->fmt.pix.pixelformat = to_uvc_format(uformat)->fcc; + fmtdesc = to_uvc_format(uformat); + if (IS_ERR(fmtdesc)) + return PTR_ERR(fmtdesc); + fmt->fmt.pix.pixelformat = fmtdesc->fcc; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; fmt->fmt.pix.priv = 0; @@ -398,6 +405,9 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f) f->flags |= V4L2_FMT_FLAG_COMPRESSED; fmtdesc = to_uvc_format(uformat); + if (IS_ERR(fmtdesc)) + return PTR_ERR(fmtdesc); + f->pixelformat = fmtdesc->fcc; strscpy(f->description, fmtdesc->name, sizeof(f->description)); diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c index 4f3bc27c1c62..73664a123c7a 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c @@ -549,6 +549,9 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx) d->vhub = vhub; d->index = idx; d->name = devm_kasprintf(parent, GFP_KERNEL, "port%d", idx+1); + if (!d->name) + return -ENOMEM; + d->regs = vhub->regs + 0x100 + 0x10 * idx; ast_vhub_init_ep0(vhub, &d->ep0, d); diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 68af1ccea94b..00e678c5a75d 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1422,8 +1422,16 @@ int usb_add_gadget(struct usb_gadget *gadget) if (ret) goto err_free_id; + ret = sysfs_create_link(&udc->dev.kobj, + &gadget->dev.kobj, "gadget"); + if (ret) + goto err_del_gadget; + return 0; + err_del_gadget: + device_del(&gadget->dev); + err_free_id: ida_free(&gadget_id_numbers, gadget->id_number); @@ -1532,8 +1540,9 @@ void usb_del_gadget(struct usb_gadget *gadget) mutex_unlock(&udc_lock); kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE); - flush_work(&gadget->work); + sysfs_remove_link(&udc->dev.kobj, "gadget"); device_del(&gadget->dev); + flush_work(&gadget->work); ida_free(&gadget_id_numbers, gadget->id_number); cancel_work_sync(&udc->vbus_work); device_unregister(&udc->dev); diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 32c9e369216c..0bbdf82f48cd 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -309,7 +309,7 @@ struct renesas_usb3_request { struct list_head queue; }; -#define USB3_EP_NAME_SIZE 8 +#define USB3_EP_NAME_SIZE 16 struct renesas_usb3_ep { struct usb_ep ep; struct renesas_usb3 *usb3; diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index fd7a9535973e..771466cdf385 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -1743,6 +1743,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep) val = xudc_readl(xudc, CTRL); val &= ~CTRL_RUN; xudc_writel(xudc, val, CTRL); + + val = xudc_readl(xudc, ST); + if (val & ST_RC) + xudc_writel(xudc, ST_RC, ST); } dev_info(xudc->dev, "ep %u disabled\n", ep->index); diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c index c25c51d26f26..395913113686 100644 --- a/drivers/usb/host/ehci-sh.c +++ b/drivers/usb/host/ehci-sh.c @@ -120,8 +120,12 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) if (IS_ERR(priv->iclk)) priv->iclk = NULL; - clk_enable(priv->fclk); - clk_enable(priv->iclk); + ret = clk_enable(priv->fclk); + if (ret) + goto fail_request_resource; + ret = clk_enable(priv->iclk); + if (ret) + goto fail_iclk; ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret != 0) { @@ -137,6 +141,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev) fail_add_hcd: clk_disable(priv->iclk); +fail_iclk: clk_disable(priv->fclk); fail_request_resource: diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c index c4ddd1022f60..8dff84e5c41b 100644 --- a/drivers/usb/host/ehci-spear.c +++ b/drivers/usb/host/ehci-spear.c @@ -106,7 +106,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) /* registers start at offset 0x0 */ hcd_to_ehci(hcd)->caps = hcd->regs; - clk_prepare_enable(sehci->clk); + retval = clk_prepare_enable(sehci->clk); + if (retval) + goto err_put_hcd; retval = usb_add_hcd(hcd, irq, IRQF_SHARED); if (retval) goto err_stop_ehci; @@ -131,8 +133,7 @@ static int spear_ehci_hcd_drv_remove(struct platform_device *pdev) usb_remove_hcd(hcd); - if (sehci->clk) - clk_disable_unprepare(sehci->clk); + clk_disable_unprepare(sehci->clk); usb_put_hcd(hcd); return 0; diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c index 19111e83ac13..8aaafba058aa 100644 --- a/drivers/usb/host/max3421-hcd.c +++ b/drivers/usb/host/max3421-hcd.c @@ -785,11 +785,17 @@ max3421_check_unlink(struct usb_hcd *hcd) retval = 1; dev_dbg(&spi->dev, "%s: URB %p unlinked=%d", __func__, urb, urb->unlinked); - usb_hcd_unlink_urb_from_ep(hcd, urb); - spin_unlock_irqrestore(&max3421_hcd->lock, - flags); - usb_hcd_giveback_urb(hcd, urb, 0); - spin_lock_irqsave(&max3421_hcd->lock, flags); + if (urb == max3421_hcd->curr_urb) { + max3421_hcd->urb_done = 1; + max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) | + BIT(MAX3421_HI_RCVDAV_BIT)); + } else { + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock_irqrestore(&max3421_hcd->lock, + flags); + usb_hcd_giveback_urb(hcd, urb, 0); + spin_lock_irqsave(&max3421_hcd->lock, flags); + } } } } @@ -1949,6 +1955,12 @@ max3421_remove(struct spi_device *spi) usb_put_hcd(hcd); } +static const struct spi_device_id max3421_spi_ids[] = { + { "max3421" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, max3421_spi_ids); + static const struct of_device_id max3421_of_match_table[] = { { .compatible = "maxim,max3421", }, {}, @@ -1958,6 +1970,7 @@ MODULE_DEVICE_TABLE(of, max3421_of_match_table); static struct spi_driver max3421_driver = { .probe = max3421_probe, .remove = max3421_remove, + .id_table = max3421_spi_ids, .driver = { .name = "max3421-hcd", .of_match_table = of_match_ptr(max3421_of_match_table), diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index d7b4f40f9ff4..747d6a0cbb60 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -165,6 +165,25 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) return 0; } +static int ohci_quirk_loongson(struct usb_hcd *hcd) +{ + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + /* + * Loongson's LS7A OHCI controller (rev 0x02) has a + * flaw. MMIO register with offset 0x60/64 is treated + * as legacy PS2-compatible keyboard/mouse interface. + * Since OHCI only use 4KB BAR resource, LS7A OHCI's + * 32KB BAR is wrapped around (the 2nd 4KB BAR space + * is the same as the 1st 4KB internally). So add 4KB + * offset (0x1000) to the OHCI registers as a quirk. + */ + if (pdev->revision == 0x2) + hcd->regs += SZ_4K; /* SZ_4K = 0x1000 */ + + return 0; +} + static int ohci_quirk_qemu(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); @@ -224,6 +243,10 @@ static const struct pci_device_id ohci_pci_quirks[] = { PCI_DEVICE(PCI_VENDOR_ID_ATI, 0x4399), .driver_data = (unsigned long)ohci_quirk_amd700, }, + { + PCI_DEVICE(PCI_VENDOR_ID_LOONGSON, 0x7a24), + .driver_data = (unsigned long)ohci_quirk_loongson, + }, { .vendor = PCI_VENDOR_ID_APPLE, .device = 0x003f, diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 2665832f9add..b96d9062a083 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -946,6 +946,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev) * booting from USB disk or using a usb keyboard */ hcc_params = readl(base + EHCI_HCC_PARAMS); + + /* LS7A EHCI controller doesn't have extended capabilities, the + * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS + * register should be 0x0 but it reads as 0xa0. So clear it to + * avoid error messages on boot. + */ + if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14) + hcc_params &= ~(0xffL << 8); + offset = (hcc_params >> 8) & 0xff; while (offset && --count) { pci_read_config_dword(pdev, offset, &cap); diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c index b2049b47a08d..53d2fbee76e2 100644 --- a/drivers/usb/host/uhci-platform.c +++ b/drivers/usb/host/uhci-platform.c @@ -122,7 +122,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) } /* Get and enable clock if any specified */ - uhci->clk = devm_clk_get(&pdev->dev, NULL); + uhci->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(uhci->clk)) { ret = PTR_ERR(uhci->clk); goto err_rmr; diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index f1367b53b260..0f8f08f60ceb 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -158,16 +158,18 @@ static void xhci_dbc_giveback(struct dbc_request *req, int status) spin_lock(&dbc->lock); } -static void xhci_dbc_flush_single_request(struct dbc_request *req) +static void trb_to_noop(union xhci_trb *trb) { - union xhci_trb *trb = req->trb; - trb->generic.field[0] = 0; trb->generic.field[1] = 0; trb->generic.field[2] = 0; trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)); +} +static void xhci_dbc_flush_single_request(struct dbc_request *req) +{ + trb_to_noop(req->trb); xhci_dbc_giveback(req, -ESHUTDOWN); } @@ -637,7 +639,6 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) case DS_DISABLED: return; case DS_CONFIGURED: - case DS_STALLED: if (dbc->driver->disconnect) dbc->driver->disconnect(dbc); break; @@ -657,6 +658,23 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) } } +static void +handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted) +{ + if (halted) { + dev_info(dbc->dev, "DbC Endpoint halted\n"); + dep->halted = 1; + + } else if (dep->halted) { + dev_info(dbc->dev, "DbC Endpoint halt cleared\n"); + dep->halted = 0; + + if (!list_empty(&dep->list_pending)) + writel(DBC_DOOR_BELL_TARGET(dep->direction), + &dbc->regs->doorbell); + } +} + static void dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event) { @@ -685,6 +703,7 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) struct xhci_ring *ring; int ep_id; int status; + struct xhci_ep_ctx *ep_ctx; u32 comp_code; size_t remain_length; struct dbc_request *req = NULL, *r; @@ -694,8 +713,30 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); dep = (ep_id == EPID_OUT) ? get_out_ep(dbc) : get_in_ep(dbc); + ep_ctx = (ep_id == EPID_OUT) ? + dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc); ring = dep->ring; + /* Match the pending request: */ + list_for_each_entry(r, &dep->list_pending, list_pending) { + if (r->trb_dma == event->trans_event.buffer) { + req = r; + break; + } + if (r->status == -COMP_STALL_ERROR) { + dev_warn(dbc->dev, "Give back stale stalled req\n"); + ring->num_trbs_free++; + xhci_dbc_giveback(r, 0); + } + } + + if (!req) { + dev_warn(dbc->dev, "no matched request\n"); + return; + } + + trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); + switch (comp_code) { case COMP_SUCCESS: remain_length = 0; @@ -706,31 +747,49 @@ static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) case COMP_TRB_ERROR: case COMP_BABBLE_DETECTED_ERROR: case COMP_USB_TRANSACTION_ERROR: - case COMP_STALL_ERROR: dev_warn(dbc->dev, "tx error %d detected\n", comp_code); status = -comp_code; break; + case COMP_STALL_ERROR: + dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n", + event->trans_event.buffer, remain_length, ep_ctx->deq); + status = 0; + dep->halted = 1; + + /* + * xHC DbC may trigger a STALL bulk xfer event when host sends a + * ClearFeature(ENDPOINT_HALT) request even if there wasn't an + * active bulk transfer. + * + * Don't give back this transfer request as hardware will later + * start processing TRBs starting from this 'STALLED' TRB, + * causing TRBs and requests to be out of sync. + * + * If STALL event shows some bytes were transferred then assume + * it's an actual transfer issue and give back the request. + * In this case mark the TRB as No-Op to avoid hw from using the + * TRB again. + */ + + if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) { + dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n"); + if (remain_length == req->length) { + dev_dbg(dbc->dev, "Spurious stall event, keep req\n"); + req->status = -COMP_STALL_ERROR; + req->actual = 0; + return; + } + dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n"); + trb_to_noop(req->trb); + } + break; + default: dev_err(dbc->dev, "unknown tx error %d\n", comp_code); status = -comp_code; break; } - /* Match the pending request: */ - list_for_each_entry(r, &dep->list_pending, list_pending) { - if (r->trb_dma == event->trans_event.buffer) { - req = r; - break; - } - } - - if (!req) { - dev_warn(dbc->dev, "no matched request\n"); - return; - } - - trace_xhci_dbc_handle_transfer(ring, &req->trb->generic); - ring->num_trbs_free++; req->actual = req->length - remain_length; xhci_dbc_giveback(req, status); @@ -750,7 +809,6 @@ static void inc_evt_deq(struct xhci_ring *ring) static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) { dma_addr_t deq; - struct dbc_ep *dep; union xhci_trb *evt; u32 ctrl, portsc; bool update_erdp = false; @@ -802,43 +860,17 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) return EVT_DISC; } - /* Handle endpoint stall event: */ + /* Check and handle changes in endpoint halt status */ ctrl = readl(&dbc->regs->control); - if ((ctrl & DBC_CTRL_HALT_IN_TR) || - (ctrl & DBC_CTRL_HALT_OUT_TR)) { - dev_info(dbc->dev, "DbC Endpoint stall\n"); - dbc->state = DS_STALLED; - - if (ctrl & DBC_CTRL_HALT_IN_TR) { - dep = get_in_ep(dbc); - xhci_dbc_flush_endpoint_requests(dep); - } - - if (ctrl & DBC_CTRL_HALT_OUT_TR) { - dep = get_out_ep(dbc); - xhci_dbc_flush_endpoint_requests(dep); - } - - return EVT_DONE; - } + handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR); + handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR); /* Clear DbC run change bit: */ if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) { writel(ctrl, &dbc->regs->control); ctrl = readl(&dbc->regs->control); } - break; - case DS_STALLED: - ctrl = readl(&dbc->regs->control); - if (!(ctrl & DBC_CTRL_HALT_IN_TR) && - !(ctrl & DBC_CTRL_HALT_OUT_TR) && - (ctrl & DBC_CTRL_DBC_RUN)) { - dbc->state = DS_CONFIGURED; - break; - } - - return EVT_DONE; default: dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state); break; @@ -941,9 +973,6 @@ static ssize_t dbc_show(struct device *dev, case DS_CONFIGURED: p = "configured"; break; - case DS_STALLED: - p = "stalled"; - break; default: p = "unknown"; } diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h index caeadfb8760b..8e5283839312 100644 --- a/drivers/usb/host/xhci-dbgcap.h +++ b/drivers/usb/host/xhci-dbgcap.h @@ -81,7 +81,6 @@ enum dbc_state { DS_ENABLED, DS_CONNECTED, DS_CONFIGURED, - DS_STALLED, }; struct dbc_ep { @@ -89,6 +88,7 @@ struct dbc_ep { struct list_head list_pending; struct xhci_ring *ring; unsigned int direction:1; + unsigned int halted:1; }; #define DBC_QUEUE_SIZE 16 diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f2e6f792bb02..01b2226e3eb6 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -2343,10 +2343,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) page_size = readl(&xhci->op_regs->page_size); xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Supported page size register = 0x%x", page_size); - i = ffs(page_size); - if (i < 16) + val = ffs(page_size) - 1; + if (val < 16) xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "Supported page size of %iK", (1 << (i+12)) / 1024); + "Supported page size of %iK", (1 << (val + 12)) / 1024); else xhci_warn(xhci, "WARN: no supported page size\n"); /* Use 4K pages, since that's common and the minimum the HC supports */ @@ -2389,7 +2389,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) * and our use of dma addresses in the trb_address_map radix tree needs * TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need. */ - if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH) + if (xhci->quirks & XHCI_TRB_OVERFETCH) + /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */ xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2); else diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c index 87f1597a0e5a..257e4d79971f 100644 --- a/drivers/usb/host/xhci-mvebu.c +++ b/drivers/usb/host/xhci-mvebu.c @@ -73,13 +73,3 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } - -int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - - /* Without reset on resume, the HC won't work at all */ - xhci->quirks |= XHCI_RESET_ON_RESUME; - - return 0; -} diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h index 3be021793cc8..9d26e22c4842 100644 --- a/drivers/usb/host/xhci-mvebu.h +++ b/drivers/usb/host/xhci-mvebu.h @@ -12,16 +12,10 @@ struct usb_hcd; #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); -int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); #else static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) { return 0; } - -static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) -{ - return 0; -} #endif #endif /* __LINUX_XHCI_MVEBU_H */ diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 2ff049e02326..ca27bc15209c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -28,8 +28,8 @@ #define SPARSE_CNTL_ENABLE 0xC12C /* Device for a quirk */ -#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 -#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 +#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 +#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009 0x1009 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1100 0x1100 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400 0x1400 @@ -38,8 +38,10 @@ #define PCI_DEVICE_ID_EJ168 0x7023 #define PCI_DEVICE_ID_EJ188 0x7052 -#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 -#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 +#define PCI_DEVICE_ID_VIA_VL805 0x3483 + +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f @@ -304,8 +306,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) + if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) { xhci->quirks |= XHCI_LPM_SUPPORT; + xhci->quirks |= XHCI_TRB_OVERFETCH; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { @@ -353,11 +357,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->device == 0x9202) { xhci->quirks |= XHCI_RESET_ON_RESUME; - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->device == 0x9203) - xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH; + xhci->quirks |= XHCI_TRB_OVERFETCH; } if (pdev->vendor == PCI_DEVICE_ID_CADENCE && @@ -763,11 +767,9 @@ static struct pci_driver xhci_pci_driver = { /* suspend and resume implemented later */ .shutdown = usb_hcd_pci_shutdown, -#ifdef CONFIG_PM .driver = { - .pm = &usb_hcd_pci_pm_ops + .pm = pm_ptr(&usb_hcd_pci_pm_ops), }, -#endif }; static int __init xhci_pci_init(void) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index f166908aff5c..0c99dc79d885 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -111,7 +111,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { }; static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { - .init_quirk = xhci_mvebu_a3700_init_quirk, + .quirks = XHCI_RESET_ON_RESUME, }; static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 7ccd159d4c7c..449078dd21aa 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -52,6 +52,7 @@ * endpoint rings; it generates events on the event ring for these. */ +#include #include #include #include @@ -379,7 +380,8 @@ static void xhci_handle_stopped_cmd_ring(struct xhci_hcd *xhci, if ((xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) && !(xhci->xhc_state & XHCI_STATE_DYING)) { xhci->current_cmd = cur_cmd; - xhci_mod_cmd_timer(xhci); + if (cur_cmd) + xhci_mod_cmd_timer(xhci); xhci_ring_cmd_db(xhci); } } @@ -954,6 +956,13 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) unsigned int slot_id = ep->vdev->slot_id; int err; + /* + * This is not going to work if the hardware is changing its dequeue + * pointers as we look at them. Completion handler will call us later. + */ + if (ep->ep_state & SET_DEQ_PENDING) + return 0; + xhci = ep->xhci; list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { @@ -1046,6 +1055,19 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) return 0; } +/* + * Erase queued TDs from transfer ring(s) and give back those the xHC didn't + * stop on. If necessary, queue commands to move the xHC off cancelled TDs it + * stopped on. Those will be given back later when the commands complete. + * + * Call under xhci->lock on a stopped endpoint. + */ +void xhci_process_cancelled_tds(struct xhci_virt_ep *ep) +{ + xhci_invalidate_cancelled_tds(ep); + xhci_giveback_invalidated_tds(ep); +} + /* * Returns the TD the endpoint ring halted on. * Only call for non-running rings without streams. @@ -1136,9 +1158,38 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, break; ep->ep_state &= ~EP_STOP_CMD_PENDING; return; + case EP_STATE_STOPPED: + /* + * Per xHCI 4.6.9, Stop Endpoint command on a Stopped + * EP is a Context State Error, and EP stays Stopped. + * + * But maybe it failed on Halted, and somebody ran Reset + * Endpoint later. EP state is now Stopped and EP_HALTED + * still set because Reset EP handler will run after us. + */ + if (ep->ep_state & EP_HALTED) + break; + /* + * On some HCs EP state remains Stopped for some tens of + * us to a few ms or more after a doorbell ring, and any + * new Stop Endpoint fails without aborting the restart. + * This handler may run quickly enough to still see this + * Stopped state, but it will soon change to Running. + * + * Assume this bug on unexpected Stop Endpoint failures. + * Keep retrying until the EP starts and stops again. + */ + fallthrough; case EP_STATE_RUNNING: /* Race, HW handled stop ep cmd before ep was running */ - xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n"); + xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n", + GET_EP_CTX_STATE(ep_ctx)); + /* + * Don't retry forever if we guessed wrong or a defective HC never starts + * the EP or says 'Running' but fails the command. We must give back TDs. + */ + if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100))) + break; command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { @@ -1316,7 +1367,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_td *td, *tmp_td; - bool deferred = false; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); @@ -1417,8 +1467,6 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", __func__, td->urb); xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); - } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) { - deferred = true; } else { xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", __func__, td->urb, td->cancel_status); @@ -1429,11 +1477,15 @@ cleanup: ep->queued_deq_seg = NULL; ep->queued_deq_ptr = NULL; - if (deferred) { - /* We have more streams to clear */ + /* Check for deferred or newly cancelled TDs */ + if (!list_empty(&ep->cancelled_td_list)) { xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", __func__); xhci_invalidate_cancelled_tds(ep); + /* Try to restart the endpoint if all is done */ + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + /* Start giving back any TDs invalidated above */ + xhci_giveback_invalidated_tds(ep); } else { /* Restart any rings with pending URBs */ xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 51eabc5e8770..14a772feab79 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1228,6 +1228,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl, tegra->otg_usb2_port); + pm_runtime_get_sync(tegra->dev); if (tegra->host_mode) { /* switch to host mode */ if (tegra->otg_usb3_port >= 0) { @@ -1257,6 +1258,7 @@ static void tegra_xhci_id_work(struct work_struct *work) } tegra_xhci_set_port_power(tegra, true, true); + pm_runtime_mark_last_busy(tegra->dev); } else { if (tegra->otg_usb3_port >= 0) @@ -1264,6 +1266,7 @@ static void tegra_xhci_id_work(struct work_struct *work) tegra_xhci_set_port_power(tegra, true, false); } + pm_runtime_put_autosuspend(tegra->dev); } #if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 086d57af63fb..4e1c3ab58620 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -8,6 +8,7 @@ * Some code borrowed from the Linux EHCI driver. */ +#include #include #include #include @@ -1902,15 +1903,27 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) } } - /* Queue a stop endpoint command, but only if this is - * the first cancellation to be handled. - */ - if (!(ep->ep_state & EP_STOP_CMD_PENDING)) { + /* These completion handlers will sort out cancelled TDs for us */ + if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) { + xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n", + urb->dev->slot_id, ep_index, ep->ep_state); + goto done; + } + + /* In this case no commands are pending but the endpoint is stopped */ + if (ep->ep_state & EP_CLEARING_TT) { + /* and cancelled TDs can be given back right away */ + xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n", + urb->dev->slot_id, ep_index, ep->ep_state); + xhci_process_cancelled_tds(ep); + } else { + /* Otherwise, queue a new Stop Endpoint command */ command = xhci_alloc_command(xhci, false, GFP_ATOMIC); if (!command) { ret = -ENOMEM; goto done; } + ep->stop_time = jiffies; ep->ep_state |= EP_STOP_CMD_PENDING; xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id, ep_index, 0); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index aa747365b044..ab01cc82dbb6 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -717,6 +717,7 @@ struct xhci_virt_ep { /* Bandwidth checking storage */ struct xhci_bw_info bw_info; struct list_head bw_endpoint_list; + unsigned long stop_time; /* Isoch Frame ID checking storage */ int next_frame_id; /* Use new Isoch TRB layout needed for extended TBC support */ @@ -1658,7 +1659,7 @@ struct xhci_hcd { #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) #define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) #define XHCI_RESET_TO_DEFAULT BIT_ULL(44) -#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45) +#define XHCI_TRB_OVERFETCH BIT_ULL(45) #define XHCI_ZHAOXIN_HOST BIT_ULL(46) #define XHCI_WRITE_64_HI_LO BIT_ULL(47) #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) @@ -1952,6 +1953,7 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, void xhci_cleanup_command_queue(struct xhci_hcd *xhci); void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring); unsigned int count_trbs(u64 addr, u64 len); +void xhci_process_cancelled_tds(struct xhci_virt_ep *ep); /* xHCI roothub code */ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port, diff --git a/drivers/usb/misc/chaoskey.c b/drivers/usb/misc/chaoskey.c index 87067c3d6109..d99d424c05a7 100644 --- a/drivers/usb/misc/chaoskey.c +++ b/drivers/usb/misc/chaoskey.c @@ -27,6 +27,8 @@ static struct usb_class_driver chaoskey_class; static int chaoskey_rng_read(struct hwrng *rng, void *data, size_t max, bool wait); +static DEFINE_MUTEX(chaoskey_list_lock); + #define usb_dbg(usb_if, format, arg...) \ dev_dbg(&(usb_if)->dev, format, ## arg) @@ -234,6 +236,7 @@ static void chaoskey_disconnect(struct usb_interface *interface) usb_deregister_dev(interface, &chaoskey_class); usb_set_intfdata(interface, NULL); + mutex_lock(&chaoskey_list_lock); mutex_lock(&dev->lock); dev->present = false; @@ -245,6 +248,7 @@ static void chaoskey_disconnect(struct usb_interface *interface) } else mutex_unlock(&dev->lock); + mutex_unlock(&chaoskey_list_lock); usb_dbg(interface, "disconnect done"); } @@ -252,6 +256,7 @@ static int chaoskey_open(struct inode *inode, struct file *file) { struct chaoskey *dev; struct usb_interface *interface; + int rv = 0; /* get the interface from minor number and driver information */ interface = usb_find_interface(&chaoskey_driver, iminor(inode)); @@ -267,18 +272,23 @@ static int chaoskey_open(struct inode *inode, struct file *file) } file->private_data = dev; + mutex_lock(&chaoskey_list_lock); mutex_lock(&dev->lock); - ++dev->open; + if (dev->present) + ++dev->open; + else + rv = -ENODEV; mutex_unlock(&dev->lock); + mutex_unlock(&chaoskey_list_lock); - usb_dbg(interface, "open success"); - return 0; + return rv; } static int chaoskey_release(struct inode *inode, struct file *file) { struct chaoskey *dev = file->private_data; struct usb_interface *interface; + int rv = 0; if (dev == NULL) return -ENODEV; @@ -287,14 +297,15 @@ static int chaoskey_release(struct inode *inode, struct file *file) usb_dbg(interface, "release"); + mutex_lock(&chaoskey_list_lock); mutex_lock(&dev->lock); usb_dbg(interface, "open count at release is %d", dev->open); if (dev->open <= 0) { usb_dbg(interface, "invalid open count (%d)", dev->open); - mutex_unlock(&dev->lock); - return -ENODEV; + rv = -ENODEV; + goto bail; } --dev->open; @@ -303,13 +314,15 @@ static int chaoskey_release(struct inode *inode, struct file *file) if (dev->open == 0) { mutex_unlock(&dev->lock); chaoskey_free(dev); - } else - mutex_unlock(&dev->lock); - } else - mutex_unlock(&dev->lock); - + goto destruction; + } + } +bail: + mutex_unlock(&dev->lock); +destruction: + mutex_unlock(&chaoskey_list_lock); usb_dbg(interface, "release success"); - return 0; + return rv; } static void chaos_read_callback(struct urb *urb) diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index b421f1326087..5606c5a2624a 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -277,28 +277,45 @@ static ssize_t iowarrior_read(struct file *file, char __user *buffer, struct iowarrior *dev; int read_idx; int offset; + int retval; dev = file->private_data; + if (file->f_flags & O_NONBLOCK) { + retval = mutex_trylock(&dev->mutex); + if (!retval) + return -EAGAIN; + } else { + retval = mutex_lock_interruptible(&dev->mutex); + if (retval) + return -ERESTARTSYS; + } + /* verify that the device wasn't unplugged */ - if (!dev || !dev->present) - return -ENODEV; + if (!dev->present) { + retval = -ENODEV; + goto exit; + } dev_dbg(&dev->interface->dev, "minor %d, count = %zd\n", dev->minor, count); /* read count must be packet size (+ time stamp) */ if ((count != dev->report_size) - && (count != (dev->report_size + 1))) - return -EINVAL; + && (count != (dev->report_size + 1))) { + retval = -EINVAL; + goto exit; + } /* repeat until no buffer overrun in callback handler occur */ do { atomic_set(&dev->overflow_flag, 0); if ((read_idx = read_index(dev)) == -1) { /* queue empty */ - if (file->f_flags & O_NONBLOCK) - return -EAGAIN; + if (file->f_flags & O_NONBLOCK) { + retval = -EAGAIN; + goto exit; + } else { //next line will return when there is either new data, or the device is unplugged int r = wait_event_interruptible(dev->read_wait, @@ -309,28 +326,37 @@ static ssize_t iowarrior_read(struct file *file, char __user *buffer, -1)); if (r) { //we were interrupted by a signal - return -ERESTART; + retval = -ERESTART; + goto exit; } if (!dev->present) { //The device was unplugged - return -ENODEV; + retval = -ENODEV; + goto exit; } if (read_idx == -1) { // Can this happen ??? - return 0; + retval = 0; + goto exit; } } } offset = read_idx * (dev->report_size + 1); if (copy_to_user(buffer, dev->read_queue + offset, count)) { - return -EFAULT; + retval = -EFAULT; + goto exit; } } while (atomic_read(&dev->overflow_flag)); read_idx = ++read_idx == MAX_INTERRUPT_BUFFER ? 0 : read_idx; atomic_set(&dev->read_idx, read_idx); + mutex_unlock(&dev->mutex); return count; + +exit: + mutex_unlock(&dev->mutex); + return retval; } /* @@ -886,7 +912,6 @@ error: static void iowarrior_disconnect(struct usb_interface *interface) { struct iowarrior *dev = usb_get_intfdata(interface); - int minor = dev->minor; usb_deregister_dev(interface, &iowarrior_class); @@ -910,9 +935,6 @@ static void iowarrior_disconnect(struct usb_interface *interface) mutex_unlock(&dev->mutex); iowarrior_delete(dev); } - - dev_info(&interface->dev, "I/O-Warror #%d now disconnected\n", - minor - IOWARRIOR_MINOR_BASE); } /* usb specific object needed to register this driver with the usb subsystem */ diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index c313cd41f7a5..0eed614ac127 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -441,7 +441,10 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer, if (count == 0) goto error; - mutex_lock(&dev->io_mutex); + retval = mutex_lock_interruptible(&dev->io_mutex); + if (retval < 0) + return -EINTR; + if (dev->disconnected) { /* already disconnected */ mutex_unlock(&dev->io_mutex); retval = -ENODEV; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index ba20272d2221..eb3464f977a7 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1170,12 +1170,19 @@ struct free_record { */ void musb_ep_restart(struct musb *musb, struct musb_request *req) { + u16 csr; + void __iomem *epio = req->ep->hw_ep->regs; + trace_musb_req_start(req); musb_ep_select(musb->mregs, req->epnum); - if (req->tx) + if (req->tx) { txstate(musb, req); - else - rxstate(musb, req); + } else { + csr = musb_readw(epio, MUSB_RXCSR); + csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; + musb_writew(epio, MUSB_RXCSR, csr); + musb_writew(epio, MUSB_RXCSR, csr); + } } static int musb_ep_restart_resume_work(struct musb *musb, void *data) diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 96f3939a65e2..9af61f17dfc7 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -312,8 +312,10 @@ static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv) priv->clks[1] = of_clk_get(dev_of_node(dev), 1); if (PTR_ERR(priv->clks[1]) == -ENOENT) priv->clks[1] = NULL; - else if (IS_ERR(priv->clks[1])) + else if (IS_ERR(priv->clks[1])) { + clk_put(priv->clks[0]); return PTR_ERR(priv->clks[1]); + } return 0; } @@ -768,6 +770,8 @@ static int usbhs_remove(struct platform_device *pdev) dev_dbg(&pdev->dev, "usb remove\n"); + flush_delayed_work(&priv->notify_hotplug_work); + /* power off */ if (!usbhs_get_dparam(priv, runtime_pwctrl)) usbhsc_power_ctrl(priv, 0); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 105132ae87ac..e8e5723f5412 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -1094,7 +1094,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) goto usbhs_mod_gadget_probe_err_gpriv; } - gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); + gpriv->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_UNDEFINED); dev_info(dev, "%stransceiver found\n", !IS_ERR(gpriv->transceiver) ? "" : "no "); diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c index a327f8bc5704..917dffe9aee5 100644 --- a/drivers/usb/roles/class.c +++ b/drivers/usb/roles/class.c @@ -354,14 +354,15 @@ usb_role_switch_register(struct device *parent, dev_set_name(&sw->dev, "%s-role-switch", desc->name ? desc->name : dev_name(parent)); + sw->registered = true; + ret = device_register(&sw->dev); if (ret) { + sw->registered = false; put_device(&sw->dev); return ERR_PTR(ret); } - sw->registered = true; - /* TODO: Symlinks for the host port and the device controller. */ return sw; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index aa30288c8a8e..c869cddb5471 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -223,6 +223,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ + { USB_DEVICE(0x1B93, 0x1013) }, /* Phoenix Contact UPS Device */ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 248cbc9c48fd..56e1c574f667 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1079,6 +1079,22 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, /* GMC devices */ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) }, + /* Altera USB Blaster 3 */ + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6022_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6025_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6026_PID, 3) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_6029_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602A_PID, 3) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602C_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602D_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 1) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 2) }, + { USB_DEVICE_INTERFACE_NUMBER(ALTERA_VID, ALTERA_UB3_602E_PID, 3) }, + /* Abacus Electrics */ + { USB_DEVICE(FTDI_VID, ABACUS_OPTICAL_PROBE_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 5ee60ba2a73c..9acb6f837327 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -442,6 +442,11 @@ #define LINX_FUTURE_1_PID 0xF44B /* Linx future device */ #define LINX_FUTURE_2_PID 0xF44C /* Linx future device */ +/* + * Abacus Electrics + */ +#define ABACUS_OPTICAL_PROBE_PID 0xf458 /* ABACUS ELECTRICS Optical Probe */ + /* * Oceanic product ids */ @@ -1612,3 +1617,16 @@ */ #define GMC_VID 0x1cd7 #define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */ + +/* + * Altera USB Blaster 3 (http://www.altera.com). + */ +#define ALTERA_VID 0x09fb +#define ALTERA_UB3_6022_PID 0x6022 +#define ALTERA_UB3_6025_PID 0x6025 +#define ALTERA_UB3_6026_PID 0x6026 +#define ALTERA_UB3_6029_PID 0x6029 +#define ALTERA_UB3_602A_PID 0x602a +#define ALTERA_UB3_602C_PID 0x602c +#define ALTERA_UB3_602D_PID 0x602d +#define ALTERA_UB3_602E_PID 0x602e diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index ec737fcd2c40..b09d95da2fe5 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -611,6 +611,7 @@ static void option_instat_callback(struct urb *urb); /* Sierra Wireless products */ #define SIERRA_VENDOR_ID 0x1199 #define SIERRA_PRODUCT_EM9191 0x90d3 +#define SIERRA_PRODUCT_EM9291 0x90e3 /* UNISOC (Spreadtrum) products */ #define UNISOC_VENDOR_ID 0x1782 @@ -619,13 +620,6 @@ static void option_instat_callback(struct urb *urb); /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ #define LUAT_PRODUCT_AIR720U 0x4e00 -/* MeiG Smart Technology products */ -#define MEIGSMART_VENDOR_ID 0x2dee -/* MeiG Smart SRM825L based on Qualcomm 315 */ -#define MEIGSMART_PRODUCT_SRM825L 0x4d22 -/* MeiG Smart SLM320 based on UNISOC UIS8910 */ -#define MEIGSMART_PRODUCT_SLM320 0x4d41 - /* Device flags */ /* Highest interface number which can be used with NCTRL() and RSVD() */ @@ -1365,23 +1359,23 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990A (rmnet) */ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990A (MBIM) */ .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990A (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990A (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */ .driver_info = RSVD(0) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990A (rmnet) */ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990 (MBIM) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1081, 0xff), /* Telit FE990A (MBIM) */ .driver_info = NCTRL(0) | RSVD(1) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990 (RNDIS) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1082, 0xff), /* Telit FE990A (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, - { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */ + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990A (ECM) */ .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, @@ -1395,6 +1389,44 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */ .driver_info = NCTRL(3) | RSVD(4) | RSVD(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x30), /* Telit FE990B (rmnet) */ + .driver_info = NCTRL(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b0, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x30), /* Telit FE990B (MBIM) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b1, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x30), /* Telit FE990B (RNDIS) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b2, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x30), /* Telit FE990B (ECM) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10b3, 0xff, 0xff, 0x60) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c0, 0xff), /* Telit FE910C04 (rmnet) */ + .driver_info = RSVD(0) | NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c4, 0xff), /* Telit FE910C04 (rmnet) */ + .driver_info = RSVD(0) | NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */ + .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x30), /* Telit FN990B (rmnet) */ + .driver_info = NCTRL(5) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x30), /* Telit FN990B (RNDIS) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d2, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x30), /* Telit FN990B (ECM) */ + .driver_info = NCTRL(6) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d3, 0xff, 0xff, 0x60) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -2247,6 +2279,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(2) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00), .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7129, 0xff, 0x00, 0x00), /* MediaTek T7XX */ + .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), .driver_info = RSVD(1) | RSVD(4) }, @@ -2337,6 +2371,14 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d41, 0xff, 0, 0) }, /* MeiG Smart SLM320 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d57, 0xff, 0, 0) }, /* MeiG Smart SLM770A */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0, 0) }, /* MeiG Smart SRM815 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x02) }, /* MeiG Smart SLM828 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x03) }, /* MeiG Smart SLM828 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x30) }, /* MeiG Smart SRM815 and SRM825L */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x40) }, /* MeiG Smart SRM825L */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x60) }, /* MeiG Smart SRM825L */ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ @@ -2375,16 +2417,31 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Golbal EDU */ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for WWAN Ready */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010a, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for WWAN Ready */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010b, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for WWAN Ready */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010c, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for WWAN Ready */ + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x010d, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, - { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) }, - { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) }, - { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) }, - { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) }, + { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */ + .driver_info = NCTRL(1) }, + { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff), /* TCL IK512 ECM */ + .driver_info = NCTRL(3) }, + { USB_DEVICE_INTERFACE_CLASS(0x2949, 0x8700, 0xff) }, /* Neoway N723-EA */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c index 6fca40ace83a..995313f4687a 100644 --- a/drivers/usb/serial/quatech2.c +++ b/drivers/usb/serial/quatech2.c @@ -511,7 +511,7 @@ static void qt2_process_read_urb(struct urb *urb) newport = *(ch + 3); - if (newport > serial->num_ports) { + if (newport >= serial->num_ports) { dev_err(&port->dev, "%s - port change to invalid port: %i\n", __func__, newport); diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 24b8772a345e..bac5ab6377ae 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -101,6 +101,11 @@ DEVICE(nokia, NOKIA_IDS); { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ DEVICE_N(novatel_gps, NOVATEL_IDS, 3); +/* OWON electronic test and measurement equipment driver */ +#define OWON_IDS() \ + { USB_DEVICE(0x5345, 0x1234) } /* HDS200 oscilloscopes and others */ +DEVICE(owon, OWON_IDS); + /* Siemens USB/MPI adapter */ #define SIEMENS_IDS() \ { USB_DEVICE(0x908, 0x0004) } @@ -135,6 +140,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &motorola_tetra_device, &nokia_device, &novatel_gps_device, + &owon_device, &siemens_mpi_device, &suunto_device, &vivopay_device, @@ -154,6 +160,7 @@ static const struct usb_device_id id_table[] = { MOTOROLA_TETRA_IDS(), NOKIA_IDS(), NOVATEL_IDS(), + OWON_IDS(), SIEMENS_IDS(), SUUNTO_IDS(), VIVOPAY_IDS(), diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 46e2e1c165c4..1bfaf5050569 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -255,6 +255,13 @@ UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 ), +/* Added by Lubomir Rintel , a very fine chap */ +UNUSUAL_DEV( 0x0421, 0x06c2, 0x0000, 0x0406, + "Nokia", + "Nokia 208", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64 ), + #ifdef NO_SDDR09 UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100, "Microtech", diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 4d4fad2471a9..108d42a2eeae 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -89,6 +89,13 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_LUNS), +/* Reported-by: Oliver Neukum */ +UNUSUAL_DEV(0x125f, 0xa94a, 0x0160, 0x0160, + "ADATA", + "Portable HDD CH94", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Benjamin Tissoires */ UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999, "Initio Corporation", diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index cbf34a889a59..99b2d1dcbf24 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -550,15 +550,20 @@ static ssize_t pin_assignment_show(struct device *dev, } static DEVICE_ATTR_RW(pin_assignment); -static struct attribute *dp_altmode_attrs[] = { +static struct attribute *displayport_attrs[] = { &dev_attr_configuration.attr, &dev_attr_pin_assignment.attr, NULL }; -static const struct attribute_group dp_altmode_group = { +static const struct attribute_group displayport_group = { .name = "displayport", - .attrs = dp_altmode_attrs, + .attrs = displayport_attrs, +}; + +static const struct attribute_group *displayport_groups[] = { + &displayport_group, + NULL, }; int dp_altmode_probe(struct typec_altmode *alt) @@ -566,7 +571,6 @@ int dp_altmode_probe(struct typec_altmode *alt) const struct typec_altmode *port = typec_altmode_get_partner(alt); struct fwnode_handle *fwnode; struct dp_altmode *dp; - int ret; /* FIXME: Port can only be DFP_U. */ @@ -577,10 +581,6 @@ int dp_altmode_probe(struct typec_altmode *alt) DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo))) return -ENODEV; - ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group); - if (ret) - return ret; - dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL); if (!dp) return -ENOMEM; @@ -611,7 +611,6 @@ void dp_altmode_remove(struct typec_altmode *alt) { struct dp_altmode *dp = typec_altmode_get_drvdata(alt); - sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group); cancel_work_sync(&dp->work); if (dp->connector_fwnode) { @@ -636,6 +635,7 @@ static struct typec_altmode_driver dp_altmode_driver = { .driver = { .name = "typec_displayport", .owner = THIS_MODULE, + .dev_groups = displayport_groups, }, }; module_typec_altmode_driver(dp_altmode_driver); diff --git a/drivers/usb/typec/anx7411.c b/drivers/usb/typec/anx7411.c index b8f3b75fd7eb..dfe4affa1757 100644 --- a/drivers/usb/typec/anx7411.c +++ b/drivers/usb/typec/anx7411.c @@ -289,6 +289,8 @@ struct anx7411_data { struct power_supply *psy; struct power_supply_desc psy_desc; struct device *dev; + struct fwnode_handle *switch_node; + struct fwnode_handle *mux_node; }; static u8 snk_identity[] = { @@ -1020,6 +1022,16 @@ static void anx7411_port_unregister_altmodes(struct typec_altmode **adev) } } +static void anx7411_port_unregister(struct typec_params *typecp) +{ + fwnode_handle_put(typecp->caps.fwnode); + anx7411_port_unregister_altmodes(typecp->port_amode); + if (typecp->port) + typec_unregister_port(typecp->port); + if (typecp->role_sw) + usb_role_switch_put(typecp->role_sw); +} + static int anx7411_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state) { @@ -1088,6 +1100,7 @@ static void anx7411_unregister_mux(struct anx7411_data *ctx) if (ctx->typec.typec_mux) { typec_mux_unregister(ctx->typec.typec_mux); ctx->typec.typec_mux = NULL; + fwnode_handle_put(ctx->mux_node); } } @@ -1096,6 +1109,7 @@ static void anx7411_unregister_switch(struct anx7411_data *ctx) if (ctx->typec.typec_switch) { typec_switch_unregister(ctx->typec.typec_switch); ctx->typec.typec_switch = NULL; + fwnode_handle_put(ctx->switch_node); } } @@ -1103,28 +1117,29 @@ static int anx7411_typec_switch_probe(struct anx7411_data *ctx, struct device *dev) { int ret; - struct device_node *node; - node = of_get_child_by_name(dev->of_node, "orientation_switch"); - if (!node) + ctx->switch_node = device_get_named_child_node(dev, "orientation_switch"); + if (!ctx->switch_node) return 0; - ret = anx7411_register_switch(ctx, dev, &node->fwnode); + ret = anx7411_register_switch(ctx, dev, ctx->switch_node); if (ret) { dev_err(dev, "failed register switch"); + fwnode_handle_put(ctx->switch_node); return ret; } - node = of_get_child_by_name(dev->of_node, "mode_switch"); - if (!node) { + ctx->mux_node = device_get_named_child_node(dev, "mode_switch"); + if (!ctx->mux_node) { dev_err(dev, "no typec mux exist"); ret = -ENODEV; goto unregister_switch; } - ret = anx7411_register_mux(ctx, dev, &node->fwnode); + ret = anx7411_register_mux(ctx, dev, ctx->mux_node); if (ret) { dev_err(dev, "failed register mode switch"); + fwnode_handle_put(ctx->mux_node); ret = -ENODEV; goto unregister_switch; } @@ -1153,34 +1168,34 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx, ret = fwnode_property_read_string(fwnode, "power-role", &buf); if (ret) { dev_err(dev, "power-role not found: %d\n", ret); - return ret; + goto put_fwnode; } ret = typec_find_port_power_role(buf); if (ret < 0) - return ret; + goto put_fwnode; cap->type = ret; ret = fwnode_property_read_string(fwnode, "data-role", &buf); if (ret) { dev_err(dev, "data-role not found: %d\n", ret); - return ret; + goto put_fwnode; } ret = typec_find_port_data_role(buf); if (ret < 0) - return ret; + goto put_fwnode; cap->data = ret; ret = fwnode_property_read_string(fwnode, "try-power-role", &buf); if (ret) { dev_err(dev, "try-power-role not found: %d\n", ret); - return ret; + goto put_fwnode; } ret = typec_find_power_role(buf); if (ret < 0) - return ret; + goto put_fwnode; cap->prefer_role = ret; /* Get source pdos */ @@ -1192,7 +1207,7 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx, typecp->src_pdo_nr); if (ret < 0) { dev_err(dev, "source cap validate failed: %d\n", ret); - return -EINVAL; + goto put_fwnode; } typecp->caps_flags |= HAS_SOURCE_CAP; @@ -1206,7 +1221,7 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx, typecp->sink_pdo_nr); if (ret < 0) { dev_err(dev, "sink cap validate failed: %d\n", ret); - return -EINVAL; + goto put_fwnode; } for (i = 0; i < typecp->sink_pdo_nr; i++) { @@ -1250,13 +1265,21 @@ static int anx7411_typec_port_probe(struct anx7411_data *ctx, ret = PTR_ERR(ctx->typec.port); ctx->typec.port = NULL; dev_err(dev, "Failed to register type c port %d\n", ret); - return ret; + goto put_usb_role_switch; } typec_port_register_altmodes(ctx->typec.port, NULL, ctx, ctx->typec.port_amode, MAX_ALTMODE); return 0; + +put_usb_role_switch: + if (ctx->typec.role_sw) + usb_role_switch_put(ctx->typec.role_sw); +put_fwnode: + fwnode_handle_put(fwnode); + + return ret; } static int anx7411_typec_check_connection(struct anx7411_data *ctx) @@ -1528,8 +1551,7 @@ free_wq: destroy_workqueue(plat->workqueue); free_typec_port: - typec_unregister_port(plat->typec.port); - anx7411_port_unregister_altmodes(plat->typec.port_amode); + anx7411_port_unregister(&plat->typec); free_typec_switch: anx7411_unregister_switch(plat); @@ -1554,17 +1576,11 @@ static void anx7411_i2c_remove(struct i2c_client *client) if (plat->spi_client) i2c_unregister_device(plat->spi_client); - if (plat->typec.role_sw) - usb_role_switch_put(plat->typec.role_sw); - anx7411_unregister_mux(plat); anx7411_unregister_switch(plat); - if (plat->typec.port) - typec_unregister_port(plat->typec.port); - - anx7411_port_unregister_altmodes(plat->typec.port_amode); + anx7411_port_unregister(&plat->typec); } static const struct i2c_device_id anx7411_id[] = { diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index f649769912e5..86efa3b2208b 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -26,6 +26,7 @@ #define VPPS_NEW_MIN_PERCENT 95 #define VPPS_VALID_MIN_MV 100 #define VSINKDISCONNECT_PD_MIN_PERCENT 90 +#define VPPS_SHUTDOWN_MIN_PERCENT 85 struct tcpci { struct device *dev; @@ -336,7 +337,8 @@ static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable) } static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode, - bool pps_active, u32 requested_vbus_voltage_mv) + bool pps_active, u32 requested_vbus_voltage_mv, + u32 apdo_min_voltage_mv) { struct tcpci *tcpci = tcpc_to_tcpci(dev); unsigned int pwr_ctrl, threshold = 0; @@ -358,9 +360,12 @@ static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum ty threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV; } else if (mode == TYPEC_PWR_MODE_PD) { if (pps_active) - threshold = ((VPPS_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) - - VSINKPD_MIN_IR_DROP_MV - VPPS_VALID_MIN_MV) * - VSINKDISCONNECT_PD_MIN_PERCENT / 100; + /* + * To prevent disconnect when the source is in Current Limit Mode. + * Set the threshold to the lowest possible voltage vPpsShutdown (min) + */ + threshold = VPPS_SHUTDOWN_MIN_PERCENT * apdo_min_voltage_mv / 100 - + VSINKPD_MIN_IR_DROP_MV; else threshold = ((VSRC_NEW_MIN_PERCENT * requested_vbus_voltage_mv / 100) - VSINKPD_MIN_IR_DROP_MV - VSRC_VALID_MIN_MV) * diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c index 7b217c712c11..d608fefd4f30 100644 --- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c +++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c @@ -332,6 +332,11 @@ static int rt1711h_probe(struct i2c_client *client, { int ret; struct rt1711h_chip *chip; + const u16 alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | + TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST | + TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS | + TCPC_ALERT_CC_STATUS | TCPC_ALERT_RX_BUF_OVF | + TCPC_ALERT_FAULT; chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); if (!chip) @@ -380,6 +385,12 @@ static int rt1711h_probe(struct i2c_client *client, dev_name(chip->dev), chip); if (ret < 0) return ret; + + /* Enable alert interrupts */ + ret = rt1711h_write16(chip, TCPC_ALERT_MASK, alert_mask); + if (ret < 0) + return ret; + enable_irq_wake(client->irq); return 0; diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 3bde95b68e34..4c066161a3d2 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -2370,10 +2370,12 @@ static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port, return 0; ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active, - requested_vbus_voltage); + requested_vbus_voltage, + port->pps_data.min_volt); tcpm_log_force(port, - "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d", - mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret); + "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u pps_apdo_min_volt:%u ret:%d", + mode, pps_active ? 'y' : 'n', requested_vbus_voltage, + port->pps_data.min_volt, ret); return ret; } @@ -4164,7 +4166,7 @@ static void run_state_machine(struct tcpm_port *port) port->caps_count = 0; port->pd_capable = true; tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT, - PD_T_SEND_SOURCE_CAP); + PD_T_SENDER_RESPONSE); } break; case SRC_SEND_CAPABILITIES_TIMEOUT: @@ -5189,7 +5191,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, case SNK_TRY_WAIT_DEBOUNCE: if (!tcpm_port_is_sink(port)) { port->max_wait = 0; - tcpm_set_state(port, SRC_TRYWAIT, 0); + tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE); } break; case SRC_TRY_WAIT: diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c index 20917d85d6f4..5d34466a0abf 100644 --- a/drivers/usb/typec/tcpm/wcove.c +++ b/drivers/usb/typec/tcpm/wcove.c @@ -621,10 +621,6 @@ static int wcove_typec_probe(struct platform_device *pdev) if (irq < 0) return irq; - irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq); - if (irq < 0) - return irq; - ret = guid_parse(WCOVE_DSM_UUID, &wcove->guid); if (ret) return ret; diff --git a/drivers/usb/typec/ucsi/displayport.c b/drivers/usb/typec/ucsi/displayport.c index 2431febc4615..e3b5fa3b5f95 100644 --- a/drivers/usb/typec/ucsi/displayport.c +++ b/drivers/usb/typec/ucsi/displayport.c @@ -54,7 +54,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) u8 cur = 0; int ret; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override && dp->initialized) { const struct typec_altmode *p = typec_altmode_get_partner(alt); @@ -100,7 +101,7 @@ static int ucsi_displayport_enter(struct typec_altmode *alt, u32 *vdo) schedule_work(&dp->work); ret = 0; err_unlock: - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return ret; } @@ -112,7 +113,8 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) u64 command; int ret = 0; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override) { const struct typec_altmode *p = typec_altmode_get_partner(alt); @@ -144,7 +146,7 @@ static int ucsi_displayport_exit(struct typec_altmode *alt) schedule_work(&dp->work); out_unlock: - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return ret; } @@ -202,20 +204,21 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, int cmd = PD_VDO_CMD(header); int svdm_version; - mutex_lock(&dp->con->lock); + if (!ucsi_con_mutex_lock(dp->con)) + return -ENOTCONN; if (!dp->override && dp->initialized) { const struct typec_altmode *p = typec_altmode_get_partner(alt); dev_warn(&p->dev, "firmware doesn't support alternate mode overriding\n"); - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return -EOPNOTSUPP; } svdm_version = typec_altmode_get_svdm_version(alt); if (svdm_version < 0) { - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return svdm_version; } @@ -259,7 +262,7 @@ static int ucsi_displayport_vdm(struct typec_altmode *alt, break; } - mutex_unlock(&dp->con->lock); + ucsi_con_mutex_unlock(dp->con); return 0; } @@ -296,6 +299,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt) if (!dp) return; + cancel_work_sync(&dp->work); + dp->data.conf = 0; dp->data.status = 0; dp->initialized = false; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 1e4059521eb0..2a03bb992806 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -25,7 +25,7 @@ * difficult to estimate the time it takes for the system to process the command * before it is actually passed to the PPM. */ -#define UCSI_TIMEOUT_MS 5000 +#define UCSI_TIMEOUT_MS 10000 /* * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests @@ -1313,11 +1313,11 @@ static int ucsi_init(struct ucsi *ucsi) err_unregister: for (con = connector; con->port; con++) { + if (con->wq) + destroy_workqueue(con->wq); ucsi_unregister_partner(con); ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON); ucsi_unregister_port_psy(con); - if (con->wq) - destroy_workqueue(con->wq); typec_unregister_port(con->port); con->port = NULL; } @@ -1398,6 +1398,40 @@ void ucsi_set_drvdata(struct ucsi *ucsi, void *data) } EXPORT_SYMBOL_GPL(ucsi_set_drvdata); +/** + * ucsi_con_mutex_lock - Acquire the connector mutex + * @con: The connector interface to lock + * + * Returns true on success, false if the connector is disconnected + */ +bool ucsi_con_mutex_lock(struct ucsi_connector *con) +{ + bool mutex_locked = false; + bool connected = true; + + while (connected && !mutex_locked) { + mutex_locked = mutex_trylock(&con->lock) != 0; + connected = con->status.flags & UCSI_CONSTAT_CONNECTED; + if (connected && !mutex_locked) + msleep(20); + } + + connected = connected && con->partner; + if (!connected && mutex_locked) + mutex_unlock(&con->lock); + + return connected; +} + +/** + * ucsi_con_mutex_unlock - Release the connector mutex + * @con: The connector interface to unlock + */ +void ucsi_con_mutex_unlock(struct ucsi_connector *con) +{ + mutex_unlock(&con->lock); +} + /** * ucsi_create - Allocate UCSI instance * @dev: Device interface to the PPM (Platform Policy Manager) @@ -1479,10 +1513,6 @@ void ucsi_unregister(struct ucsi *ucsi) for (i = 0; i < ucsi->cap.num_connectors; i++) { cancel_work_sync(&ucsi->connector[i].work); - ucsi_unregister_partner(&ucsi->connector[i]); - ucsi_unregister_altmodes(&ucsi->connector[i], - UCSI_RECIPIENT_CON); - ucsi_unregister_port_psy(&ucsi->connector[i]); if (ucsi->connector[i].wq) { struct ucsi_work *uwork; @@ -1497,6 +1527,11 @@ void ucsi_unregister(struct ucsi *ucsi) mutex_unlock(&ucsi->connector[i].lock); destroy_workqueue(ucsi->connector[i].wq); } + + ucsi_unregister_partner(&ucsi->connector[i]); + ucsi_unregister_altmodes(&ucsi->connector[i], + UCSI_RECIPIENT_CON); + ucsi_unregister_port_psy(&ucsi->connector[i]); typec_unregister_port(ucsi->connector[i].port); } diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 4a1a86e37fd5..793a8307dded 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -15,6 +15,7 @@ struct ucsi; struct ucsi_altmode; +struct ucsi_connector; /* UCSI offsets (Bytes) */ #define UCSI_VERSION 0 @@ -62,6 +63,8 @@ int ucsi_register(struct ucsi *ucsi); void ucsi_unregister(struct ucsi *ucsi); void *ucsi_get_drvdata(struct ucsi *ucsi); void ucsi_set_drvdata(struct ucsi *ucsi, void *data); +bool ucsi_con_mutex_lock(struct ucsi_connector *con); +void ucsi_con_mutex_unlock(struct ucsi_connector *con); void ucsi_connector_change(struct ucsi *ucsi, u8 num); diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c index 8e500fe41e78..e690b6e53480 100644 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c @@ -585,6 +585,10 @@ static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset, uc->has_multiple_dp) { con_index = (uc->last_cmd_sent >> 16) & UCSI_CMD_CONNECTOR_MASK; + if (con_index == 0) { + ret = -EINVAL; + goto err_put; + } con = &uc->ucsi->connector[con_index - 1]; ucsi_ccg_update_set_new_cam_cmd(uc, con, (u64 *)val); } @@ -599,6 +603,7 @@ static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset, err_clear_bit: clear_bit(DEV_CMD_PENDING, &uc->flags); +err_put: pm_runtime_put_sync(uc->dev); mutex_unlock(&uc->lock); diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 113aac0446de..8cc84840efe8 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -166,9 +166,12 @@ again: klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start)); preve = dmr->end; } else { + u64 bcount = min_t(u64, dmr->start - preve, MAX_KLM_SIZE); + klm->key = cpu_to_be32(mvdev->res.null_mkey); - klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve)); - preve = dmr->start; + klm->bcount = cpu_to_be32(klm_bcount(bcount)); + preve += bcount; + goto again; } } @@ -227,21 +230,19 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr unsigned long lgcd = 0; int log_entity_size; unsigned long size; - u64 start = 0; int err; struct page *pg; unsigned int nsg; int sglen; - u64 pa; + u64 pa, offset; u64 paend; struct scatterlist *sg; struct device *dma = mvdev->vdev.dma_dev; for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); - map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { + map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { size = maplen(map, mr); lgcd = gcd(lgcd, size); - start += size; } log_entity_size = ilog2(lgcd); @@ -255,8 +256,10 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr sg = mr->sg_head.sgl; for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { - paend = map->addr + maplen(map, mr); - for (pa = map->addr; pa < paend; pa += sglen) { + offset = mr->start > map->start ? mr->start - map->start : 0; + pa = map->addr + offset; + paend = map->addr + offset + maplen(map, mr); + for (; pa < paend; pa += sglen) { pg = pfn_to_page(__phys_to_pfn(pa)); if (!sg) { mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n", diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 281287fae89f..1d6d89c08e6e 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -591,7 +591,11 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto mdev_err; } - mdev_id = kzalloc(sizeof(struct virtio_device_id), GFP_KERNEL); + /* + * id_table should be a null terminated array, so allocate one additional + * entry here, see vdpa_mgmtdev_get_classes(). + */ + mdev_id = kcalloc(2, sizeof(struct virtio_device_id), GFP_KERNEL); if (!mdev_id) { err = -ENOMEM; goto mdev_id_err; @@ -611,8 +615,8 @@ static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto probe_err; } - mdev_id->device = mdev->id.device; - mdev_id->vendor = mdev->id.vendor; + mdev_id[0].device = mdev->id.device; + mdev_id[0].vendor = mdev->id.vendor; mgtdev->id_table = mdev_id; mgtdev->max_supported_vqs = vp_modern_get_num_queues(mdev); mgtdev->supported_features = vp_modern_get_features(mdev); diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c index dfab5b742191..76ef63b940d9 100644 --- a/drivers/vfio/iova_bitmap.c +++ b/drivers/vfio/iova_bitmap.c @@ -126,7 +126,7 @@ struct iova_bitmap { static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap, unsigned long iova) { - unsigned long pgsize = 1 << bitmap->mapped.pgshift; + unsigned long pgsize = 1UL << bitmap->mapped.pgshift; return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize); } diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index c604b70437a5..3f93b5c3f099 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -1106,7 +1106,8 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev, struct mlx5_vhca_qp *host_qp; struct mlx5_vhca_qp *fw_qp; struct mlx5_core_dev *mdev; - u32 max_msg_size = PAGE_SIZE; + u32 log_max_msg_size; + u32 max_msg_size; u64 rq_size = SZ_2M; u32 max_recv_wr; int err; @@ -1123,6 +1124,12 @@ int mlx5vf_start_page_tracker(struct vfio_device *vdev, } mdev = mvdev->mdev; + log_max_msg_size = MLX5_CAP_ADV_VIRTUALIZATION(mdev, pg_track_log_max_msg_size); + max_msg_size = (1ULL << log_max_msg_size); + /* The RQ must hold at least 4 WQEs/messages for successful QP creation */ + if (rq_size < 4 * max_msg_size) + rq_size = 4 * max_msg_size; + memset(tracker, 0, sizeof(*tracker)); tracker->uar = mlx5_get_uars_page(mdev); if (IS_ERR(tracker->uar)) { @@ -1212,25 +1219,41 @@ set_report_output(u32 size, int index, struct mlx5_vhca_qp *qp, { u32 entry_size = MLX5_ST_SZ_BYTES(page_track_report_entry); u32 nent = size / entry_size; + u32 nent_in_page; + u32 nent_to_set; struct page *page; + u32 page_offset; + u32 page_index; + u32 buf_offset; + void *kaddr; u64 addr; u64 *buf; int i; - if (WARN_ON(index >= qp->recv_buf.npages || + buf_offset = index * qp->max_msg_size; + if (WARN_ON(buf_offset + size >= qp->recv_buf.npages * PAGE_SIZE || (nent > qp->max_msg_size / entry_size))) return; - page = qp->recv_buf.page_list[index]; - buf = kmap_local_page(page); - for (i = 0; i < nent; i++) { - addr = MLX5_GET(page_track_report_entry, buf + i, - dirty_address_low); - addr |= (u64)MLX5_GET(page_track_report_entry, buf + i, - dirty_address_high) << 32; - iova_bitmap_set(dirty, addr, qp->tracked_page_size); - } - kunmap_local(buf); + do { + page_index = buf_offset / PAGE_SIZE; + page_offset = buf_offset % PAGE_SIZE; + nent_in_page = (PAGE_SIZE - page_offset) / entry_size; + page = qp->recv_buf.page_list[page_index]; + kaddr = kmap_local_page(page); + buf = kaddr + page_offset; + nent_to_set = min(nent, nent_in_page); + for (i = 0; i < nent_to_set; i++) { + addr = MLX5_GET(page_track_report_entry, buf + i, + dirty_address_low); + addr |= (u64)MLX5_GET(page_track_report_entry, buf + i, + dirty_address_high) << 32; + iova_bitmap_set(dirty, addr, qp->tracked_page_size); + } + kunmap_local(kaddr); + buf_offset += (nent_to_set * entry_size); + nent -= nent_to_set; + } while (nent); } static void diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c index 523e0144c86f..105243d83b2d 100644 --- a/drivers/vfio/pci/vfio_pci_config.c +++ b/drivers/vfio/pci/vfio_pci_config.c @@ -312,6 +312,10 @@ static int vfio_virt_config_read(struct vfio_pci_core_device *vdev, int pos, return count; } +static struct perm_bits direct_ro_perms = { + .readfn = vfio_direct_config_read, +}; + /* Default capability regions to read-only, no-virtualization */ static struct perm_bits cap_perms[PCI_CAP_ID_MAX + 1] = { [0 ... PCI_CAP_ID_MAX] = { .readfn = vfio_direct_config_read } @@ -1802,7 +1806,8 @@ int vfio_config_init(struct vfio_pci_core_device *vdev) cpu_to_le16(PCI_COMMAND_MEMORY); } - if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx) + if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx || + vdev->pdev->irq == IRQ_NOTCONNECTED) vconfig[PCI_INTERRUPT_PIN] = 0; ret = vfio_cap_init(vdev); @@ -1890,9 +1895,17 @@ static ssize_t vfio_config_do_rw(struct vfio_pci_core_device *vdev, char __user cap_start = *ppos; } else { if (*ppos >= PCI_CFG_SPACE_SIZE) { - WARN_ON(cap_id > PCI_EXT_CAP_ID_MAX); + /* + * We can get a cap_id that exceeds PCI_EXT_CAP_ID_MAX + * if we're hiding an unknown capability at the start + * of the extended capability list. Use default, ro + * access, which will virtualize the id and next values. + */ + if (cap_id > PCI_EXT_CAP_ID_MAX) + perm = &direct_ro_perms; + else + perm = &ecap_perms[cap_id]; - perm = &ecap_perms[cap_id]; cap_start = vfio_find_cap_start(vdev, *ppos); } else { WARN_ON(cap_id > PCI_CAP_ID_MAX); diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index f357fd157e1e..aa362b434413 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -719,15 +719,7 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_finish_enable); static int vfio_pci_get_irq_count(struct vfio_pci_core_device *vdev, int irq_type) { if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) { - u8 pin; - - if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || - vdev->nointx || vdev->pdev->is_virtfn) - return 0; - - pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin); - - return pin ? 1 : 0; + return vdev->vconfig[PCI_INTERRUPT_PIN] ? 1 : 0; } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) { u8 pos; u16 flags; diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index 5cbcde32ff79..64d78944efa5 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -207,7 +207,7 @@ static int vfio_intx_enable(struct vfio_pci_core_device *vdev, if (!is_irq_none(vdev)) return -EINVAL; - if (!pdev->irq) + if (!pdev->irq || pdev->irq == IRQ_NOTCONNECTED) return -ENODEV; name = kasprintf(GFP_KERNEL_ACCOUNT, "vfio-intx(%s)", pci_name(pdev)); diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index e27de61ac9fe..8191c8fcfb25 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "vfio_pci_priv.h" diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index 1a0a238ffa35..1820ed0f9e01 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -391,6 +391,16 @@ static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg, { unsigned int done = 0; + if (off >= reg->size) + return -EINVAL; + + count = min_t(size_t, count, reg->size - off); + + if (off >= reg->size) + return -EINVAL; + + count = min_t(size_t, count, reg->size - off); + if (!reg->ioaddr) { reg->ioaddr = ioremap(reg->addr, reg->size); @@ -470,6 +480,16 @@ static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg, { unsigned int done = 0; + if (off >= reg->size) + return -EINVAL; + + count = min_t(size_t, count, reg->size - off); + + if (off >= reg->size) + return -EINVAL; + + count = min_t(size_t, count, reg->size - off); + if (!reg->ioaddr) { reg->ioaddr = ioremap(reg->addr, reg->size); diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c index f54d256e2d54..589dae9ebb63 100644 --- a/drivers/video/backlight/led_bl.c +++ b/drivers/video/backlight/led_bl.c @@ -217,7 +217,7 @@ static int led_bl_probe(struct platform_device *pdev) return 0; } -static int led_bl_remove(struct platform_device *pdev) +static void led_bl_remove(struct platform_device *pdev) { struct led_bl_data *priv = platform_get_drvdata(pdev); struct backlight_device *bl = priv->bl_dev; @@ -226,10 +226,11 @@ static int led_bl_remove(struct platform_device *pdev) backlight_device_unregister(bl); led_bl_power_off(priv); - for (i = 0; i < priv->nb_leds; i++) + for (i = 0; i < priv->nb_leds; i++) { + mutex_lock(&priv->leds[i]->led_access); led_sysfs_enable(priv->leds[i]); - - return 0; + mutex_unlock(&priv->leds[i]->led_access); + } } static const struct of_device_id led_bl_of_match[] = { @@ -245,7 +246,7 @@ static struct platform_driver led_bl_driver = { .of_match_table = of_match_ptr(led_bl_of_match), }, .probe = led_bl_probe, - .remove = led_bl_remove, + .remove_new = led_bl_remove, }; module_platform_driver(led_bl_driver); diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 22cea5082ac4..5c4df3aaab1e 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -24,7 +24,7 @@ config VGA_CONSOLE Say Y. config MDA_CONSOLE - depends on !M68K && !PARISC && ISA + depends on VGA_CONSOLE && ISA tristate "MDA text console (dual-headed)" help Say Y here if you have an old MDA or monochrome Hercules graphics diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 519313b8bb00..ab7f311d1b56 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -137,13 +137,15 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) */ int au1100fb_setmode(struct au1100fb_device *fbdev) { - struct fb_info *info = &fbdev->info; + struct fb_info *info; u32 words; int index; if (!fbdev) return -EINVAL; + info = &fbdev->info; + /* Update var-dependent FB info */ if (panel_is_active(fbdev->panel) || panel_is_color(fbdev->panel)) { if (info->var.bits_per_pixel <= 8) { diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index 8587c9da0670..42e681a78136 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -59,12 +59,11 @@ static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy, } static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; struct fb_fillrect region; - region.color = attr_bgcol_ec(bgshift, vc, info); + region.color = bg; region.dx = sx * vc->vc_font.width; region.dy = sy * vc->vc_font.height; region.width = width * vc->vc_font.width; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index e6640edec155..538e932055ca 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1240,7 +1240,7 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, { struct fb_info *info = fbcon_info_from_console(vc->vc_num); struct fbcon_ops *ops = info->fbcon_par; - + int fg, bg; struct fbcon_display *p = &fb_display[vc->vc_num]; u_int y_break; @@ -1261,16 +1261,18 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height, fbcon_clear_margins(vc, 0); } + fg = get_color(vc, info, vc->vc_video_erase_char, 1); + bg = get_color(vc, info, vc->vc_video_erase_char, 0); /* Split blits that cross physical y_wrap boundary */ y_break = p->vrows - p->yscroll; if (sy < y_break && sy + height - 1 >= y_break) { u_int b = y_break - sy; - ops->clear(vc, info, real_y(p, sy), sx, b, width); + ops->clear(vc, info, real_y(p, sy), sx, b, width, fg, bg); ops->clear(vc, info, real_y(p, sy + b), sx, height - b, - width); + width, fg, bg); } else - ops->clear(vc, info, real_y(p, sy), sx, height, width); + ops->clear(vc, info, real_y(p, sy), sx, height, width, fg, bg); } static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 0eaf54a21151..25691d4b027b 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -55,7 +55,7 @@ struct fbcon_ops { void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width); void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width); + int sx, int height, int width, int fb, int bg); void (*putcs)(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx, int fg, int bg); @@ -116,42 +116,6 @@ static inline int mono_col(const struct fb_info *info) return (~(0xfff << max_len)) & 0xff; } -static inline int attr_col_ec(int shift, struct vc_data *vc, - struct fb_info *info, int is_fg) -{ - int is_mono01; - int col; - int fg; - int bg; - - if (!vc) - return 0; - - if (vc->vc_can_do_color) - return is_fg ? attr_fgcol(shift,vc->vc_video_erase_char) - : attr_bgcol(shift,vc->vc_video_erase_char); - - if (!info) - return 0; - - col = mono_col(info); - is_mono01 = info->fix.visual == FB_VISUAL_MONO01; - - if (attr_reverse(vc->vc_video_erase_char)) { - fg = is_mono01 ? col : 0; - bg = is_mono01 ? 0 : col; - } - else { - fg = is_mono01 ? 0 : col; - bg = is_mono01 ? col : 0; - } - - return is_fg ? fg : bg; -} - -#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) -#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) - /* * Scroll Method */ diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 2789ace79634..9f4d65478554 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -78,14 +78,13 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, } static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vyres = GETVYRES(ops->p, info); - region.color = attr_bgcol_ec(bgshift,vc,info); + region.color = bg; region.dx = sy * vc->vc_font.height; region.dy = vyres - ((sx + width) * vc->vc_font.width); region.height = width * vc->vc_font.width; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 86a254c1b2b7..b18e31886da1 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -63,14 +63,13 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, } static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vxres = GETVXRES(ops->p, info); - region.color = attr_bgcol_ec(bgshift,vc,info); + region.color = bg; region.dx = vxres - ((sy + height) * vc->vc_font.height); region.dy = sx * vc->vc_font.width; region.height = width * vc->vc_font.width; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 23bc045769d0..b6b074cfd9dc 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -64,15 +64,14 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, } static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vyres = GETVYRES(ops->p, info); u32 vxres = GETVXRES(ops->p, info); - region.color = attr_bgcol_ec(bgshift,vc,info); + region.color = bg; region.dy = vyres - ((sy + height) * vc->vc_font.height); region.dx = vxres - ((sx + width) * vc->vc_font.width); region.width = width * vc->vc_font.width; diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 2768eff247ba..b3aa0c6620c7 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -32,16 +32,14 @@ static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy, } static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy, - int sx, int height, int width) + int sx, int height, int width, int fg, int bg) { struct fb_tilerect rect; - int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - int fgshift = (vc->vc_hi_font_mask) ? 9 : 8; rect.index = vc->vc_video_erase_char & ((vc->vc_hi_font_mask) ? 0x1ff : 0xff); - rect.fg = attr_fgcol_ec(fgshift, vc, info); - rect.bg = attr_bgcol_ec(bgshift, vc, info); + rect.fg = fg; + rect.bg = bg; rect.sx = sx; rect.sy = sy; rect.width = width; @@ -76,7 +74,42 @@ static void tile_putcs(struct vc_data *vc, struct fb_info *info, static void tile_clear_margins(struct vc_data *vc, struct fb_info *info, int color, int bottom_only) { - return; + unsigned int cw = vc->vc_font.width; + unsigned int ch = vc->vc_font.height; + unsigned int rw = info->var.xres - (vc->vc_cols*cw); + unsigned int bh = info->var.yres - (vc->vc_rows*ch); + unsigned int rs = info->var.xres - rw; + unsigned int bs = info->var.yres - bh; + unsigned int vwt = info->var.xres_virtual / cw; + unsigned int vht = info->var.yres_virtual / ch; + struct fb_tilerect rect; + + rect.index = vc->vc_video_erase_char & + ((vc->vc_hi_font_mask) ? 0x1ff : 0xff); + rect.fg = color; + rect.bg = color; + + if ((int) rw > 0 && !bottom_only) { + rect.sx = (info->var.xoffset + rs + cw - 1) / cw; + rect.sy = 0; + rect.width = (rw + cw - 1) / cw; + rect.height = vht; + if (rect.width + rect.sx > vwt) + rect.width = vwt - rect.sx; + if (rect.sx < vwt) + info->tileops->fb_tilefill(info, &rect); + } + + if ((int) bh > 0) { + rect.sx = info->var.xoffset / cw; + rect.sy = (info->var.yoffset + bs) / ch; + rect.width = rs / cw; + rect.height = (bh + ch - 1) / ch; + if (rect.height + rect.sy > vht) + rect.height = vht - rect.sy; + if (rect.sy < vht) + info->tileops->fb_tilefill(info, &rect); + } } static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 16c1aaae9afa..53944bcc990c 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -570,15 +570,10 @@ static int efifb_probe(struct platform_device *dev) break; } - err = sysfs_create_groups(&dev->dev.kobj, efifb_groups); - if (err) { - pr_err("efifb: cannot add sysfs attrs\n"); - goto err_unmap; - } err = fb_alloc_cmap(&info->cmap, 256, 0); if (err < 0) { pr_err("efifb: cannot allocate colormap\n"); - goto err_groups; + goto err_unmap; } if (efifb_pci_dev) @@ -597,8 +592,6 @@ err_put_rpm_ref: pm_runtime_put(&efifb_pci_dev->dev); fb_dealloc_cmap(&info->cmap); -err_groups: - sysfs_remove_groups(&dev->dev.kobj, efifb_groups); err_unmap: if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC)) iounmap(info->screen_base); @@ -618,7 +611,6 @@ static int efifb_remove(struct platform_device *pdev) /* efifb_destroy takes care of info cleanup */ unregister_framebuffer(info); - sysfs_remove_groups(&pdev->dev.kobj, efifb_groups); return 0; } @@ -626,6 +618,7 @@ static int efifb_remove(struct platform_device *pdev) static struct platform_driver efifb_driver = { .driver = { .name = "efi-framebuffer", + .dev_groups = efifb_groups, }, .probe = efifb_probe, .remove = efifb_remove, diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index ce3c5b0b8f4e..53be4ab374cc 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -1829,6 +1829,7 @@ static int fsl_diu_remove(struct platform_device *pdev) int i; data = dev_get_drvdata(&pdev->dev); + device_remove_file(&pdev->dev, &data->dev_attr); disable_lcdc(&data->fsl_diu_info[0]); free_irq(data->irq, data->diu_reg); diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index d3d643cf7506..41c496ff55cc 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -1106,7 +1106,7 @@ static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info) if (par->need_docopy) { vfree(par->dio_vp); - iounmap(info->screen_base); + iounmap(par->mmio_vp); vmbus_free_mmio(par->mem->start, screen_fb_size); } else { hvfb_release_phymem(hdev, info->fix.smem_start, diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c index f85817635a8c..0da23c57e475 100644 --- a/drivers/video/fbdev/omap/lcd_dma.c +++ b/drivers/video/fbdev/omap/lcd_dma.c @@ -432,8 +432,8 @@ static int __init omap_init_lcd_dma(void) spin_lock_init(&lcd_dma.lock); - r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, - "LCD DMA", NULL); + r = request_threaded_irq(INT_DMA_LCD, NULL, lcd_dma_irq_handler, + IRQF_ONESHOT, "LCD DMA", NULL); if (r != 0) pr_err("unable to request IRQ for LCD DMA (error %d)\n", r); diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c index 92fb6b7e1f68..a6225f962190 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c @@ -2749,9 +2749,13 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi, bool mem_to_mem) { int r; - enum omap_overlay_caps caps = dss_feat_get_overlay_caps(plane); + enum omap_overlay_caps caps; enum omap_channel channel; + if (plane == OMAP_DSS_WB) + return -EINVAL; + + caps = dss_feat_get_overlay_caps(plane); channel = dispc_ovl_get_channel_out(plane); DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->" diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c index 0282d4eef139..3b16c3342cb7 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c @@ -102,6 +102,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port) np = of_get_next_parent(np); } + of_node_put(np); return NULL; } diff --git a/drivers/video/fbdev/sh7760fb.c b/drivers/video/fbdev/sh7760fb.c index 5978a8921232..62e28d315d81 100644 --- a/drivers/video/fbdev/sh7760fb.c +++ b/drivers/video/fbdev/sh7760fb.c @@ -359,7 +359,7 @@ static void sh7760fb_free_mem(struct fb_info *info) if (!info->screen_base) return; - dma_free_coherent(info->dev, info->screen_size, + dma_free_coherent(info->device, info->screen_size, info->screen_base, par->fbdma); par->fbdma = 0; @@ -408,14 +408,13 @@ static int sh7760fb_alloc_mem(struct fb_info *info) if (vram < PAGE_SIZE) vram = PAGE_SIZE; - fbmem = dma_alloc_coherent(info->dev, vram, &par->fbdma, GFP_KERNEL); - + fbmem = dma_alloc_coherent(info->device, vram, &par->fbdma, GFP_KERNEL); if (!fbmem) return -ENOMEM; if ((par->fbdma & SH7760FB_DMA_MASK) != SH7760FB_DMA_MASK) { - sh7760fb_free_mem(info); - dev_err(info->dev, "kernel gave me memory at 0x%08lx, which is" + dma_free_coherent(info->device, vram, fbmem, par->fbdma); + dev_err(info->device, "kernel gave me memory at 0x%08lx, which is" "unusable for the LCDC\n", (unsigned long)par->fbdma); return -ENOMEM; } @@ -486,7 +485,7 @@ static int sh7760fb_probe(struct platform_device *pdev) ret = sh7760fb_alloc_mem(info); if (ret) { - dev_dbg(info->dev, "framebuffer memory allocation failed!\n"); + dev_dbg(info->device, "framebuffer memory allocation failed!\n"); goto out_unmap; } diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index f743bfbde2a6..bbbc9de06a81 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -326,6 +326,13 @@ static int sm501fb_check_var(struct fb_var_screeninfo *var, if (var->xres_virtual > 4096 || var->yres_virtual > 2048) return -EINVAL; + /* geometry sanity checks */ + if (var->xres + var->xoffset > var->xres_virtual) + return -EINVAL; + + if (var->yres + var->yoffset > var->yres_virtual) + return -EINVAL; + /* can cope with 8,16 or 32bpp */ if (var->bits_per_pixel <= 8) diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c index 423ea888d79a..92730c08fd79 100644 --- a/drivers/virt/acrn/hsm.c +++ b/drivers/virt/acrn/hsm.c @@ -49,7 +49,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) switch (cmd & PMCMD_TYPE_MASK) { case ACRN_PMCMD_GET_PX_CNT: case ACRN_PMCMD_GET_CX_CNT: - pm_info = kmalloc(sizeof(u64), GFP_KERNEL); + pm_info = kzalloc(sizeof(u64), GFP_KERNEL); if (!pm_info) return -ENOMEM; @@ -64,7 +64,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) kfree(pm_info); break; case ACRN_PMCMD_GET_PX_DATA: - px_data = kmalloc(sizeof(*px_data), GFP_KERNEL); + px_data = kzalloc(sizeof(*px_data), GFP_KERNEL); if (!px_data) return -ENOMEM; @@ -79,7 +79,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr) kfree(px_data); break; case ACRN_PMCMD_GET_CX_DATA: - cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL); + cx_data = kzalloc(sizeof(*cx_data), GFP_KERNEL); if (!cx_data) return -ENOMEM; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 7d320f799ca1..06a64c4adc98 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2415,7 +2415,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) struct vring_virtqueue *vq = to_vvq(_vq); if (vq->event_triggered) - vq->event_triggered = false; + data_race(vq->event_triggered = false); return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) : virtqueue_enable_cb_delayed_split(_vq); diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c index 16aca21f13d6..a5af532c4086 100644 --- a/drivers/watchdog/apple_wdt.c +++ b/drivers/watchdog/apple_wdt.c @@ -130,7 +130,7 @@ static int apple_wdt_restart(struct watchdog_device *wdd, unsigned long mode, * can take up to ~20-25ms until the SoC is actually reset. Just wait * 50ms here to be safe. */ - (void)readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME); + (void)readl(wdt->regs + APPLE_WDT_WD1_CUR_TIME); mdelay(50); return 0; diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index e937b4dd28be..35ae40b35f55 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -82,6 +82,13 @@ #define TCO2_CNT(p) (TCOBASE(p) + 0x0a) /* TCO2 Control Register */ #define TCOv2_TMR(p) (TCOBASE(p) + 0x12) /* TCOv2 Timer Initial Value*/ +/* + * NMI_NOW is bit 8 of TCO1_CNT register + * Read/Write + * This bit is implemented as RW but has no effect on HW. + */ +#define NMI_NOW BIT(8) + /* internal variables */ struct iTCO_wdt_private { struct watchdog_device wddev; @@ -219,13 +226,23 @@ static int update_no_reboot_bit_cnt(void *priv, bool set) struct iTCO_wdt_private *p = priv; u16 val, newval; - val = inw(TCO1_CNT(p)); + /* + * writing back 1b1 to NMI_NOW of TCO1_CNT register + * causes NMI_NOW bit inversion what consequently does + * not allow to perform the register's value comparison + * properly. + * + * NMI_NOW bit masking for TCO1_CNT register values + * helps to avoid possible NMI_NOW bit inversions on + * following write operation. + */ + val = inw(TCO1_CNT(p)) & ~NMI_NOW; if (set) val |= BIT(0); else val &= ~BIT(0); outw(val, TCO1_CNT(p)); - newval = inw(TCO1_CNT(p)); + newval = inw(TCO1_CNT(p)) & ~NMI_NOW; /* make sure the update is successful */ return val != newval ? -EIO : 0; diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index 843f9f8e3917..239947df613d 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -20,6 +20,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include #include #include #include @@ -40,6 +42,7 @@ #define VAL 0x2f /* Logical device Numbers LDN */ +#define EC 0x04 #define GPIO 0x07 /* Configuration Registers and Functions */ @@ -71,6 +74,12 @@ #define IT8784_ID 0x8784 #define IT8786_ID 0x8786 +/* Environment Controller Configuration Registers LDN=0x04 */ +#define SCR1 0xfa + +/* Environment Controller Bits SCR1 */ +#define WDT_PWRGD 0x20 + /* GPIO Configuration Registers LDN=0x07 */ #define WDTCTRL 0x71 #define WDTCFG 0x72 @@ -233,6 +242,21 @@ static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int t) return ret; } +enum { + IT87_WDT_OUTPUT_THROUGH_PWRGD = BIT(0), +}; + +static const struct dmi_system_id it87_quirks[] = { + { + /* Qotom Q30900P (IT8786) */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_NAME, "QCML04"), + }, + .driver_data = (void *)IT87_WDT_OUTPUT_THROUGH_PWRGD, + }, + {} +}; + static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 1, @@ -254,8 +278,10 @@ static struct watchdog_device wdt_dev = { static int __init it87_wdt_init(void) { + const struct dmi_system_id *dmi_id; u8 chip_rev; u8 ctrl; + int quirks = 0; int rc; rc = superio_enter(); @@ -266,6 +292,10 @@ static int __init it87_wdt_init(void) chip_rev = superio_inb(CHIPREV) & 0x0f; superio_exit(); + dmi_id = dmi_first_match(it87_quirks); + if (dmi_id) + quirks = (long)dmi_id->driver_data; + switch (chip_type) { case IT8702_ID: max_units = 255; @@ -326,6 +356,15 @@ static int __init it87_wdt_init(void) superio_outb(0x00, WDTCTRL); } + if (quirks & IT87_WDT_OUTPUT_THROUGH_PWRGD) { + superio_select(EC); + ctrl = superio_inb(SCR1); + if (!(ctrl & WDT_PWRGD)) { + ctrl |= WDT_PWRGD; + superio_outb(ctrl, SCR1); + } + } + superio_exit(); if (timeout < 1 || timeout > max_units * 60) { diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index e97787536792..08763ff950c6 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c @@ -175,9 +175,15 @@ static int mtk_wdt_restart(struct watchdog_device *wdt_dev, { struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); void __iomem *wdt_base; + u32 reg; wdt_base = mtk_wdt->wdt_base; + /* Enable reset in order to issue a system reset instead of an IRQ */ + reg = readl(wdt_base + WDT_MODE); + reg &= ~WDT_MODE_IRQ_EN; + writel(reg | WDT_MODE_KEY, wdt_base + WDT_MODE); + while (1) { writel(WDT_SWRST_KEY, wdt_base + WDT_SWRST); mdelay(5); diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index fe27039f6f5a..8a09d26cfe10 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -54,7 +54,7 @@ #define MAX_HW_ERROR 250 -static int heartbeat = DEFAULT_HEARTBEAT; +static int heartbeat; /* * struct to hold data for each WDT device @@ -240,6 +240,7 @@ static int rti_wdt_probe(struct platform_device *pdev) wdd->min_timeout = 1; wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) / wdt->freq * 1000; + wdd->timeout = DEFAULT_HEARTBEAT; wdd->parent = dev; watchdog_set_drvdata(wdd, wdt); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index d5d7c402b651..ab135c3e4341 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -271,7 +271,7 @@ config XEN_PRIVCMD config XEN_ACPI_PROCESSOR tristate "Xen ACPI processor" - depends on XEN && XEN_PV_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ + depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ default m help This ACPI processor uploads Power Management information to the Xen diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 544d3f9010b9..1db82da56db6 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -26,6 +26,8 @@ #define DRV_NAME "xen-platform-pci" +#define PCI_DEVICE_ID_XEN_PLATFORM_XS61 0x0002 + static unsigned long platform_mmio; static unsigned long platform_mmio_alloc; static unsigned long platform_mmiolen; @@ -174,6 +176,8 @@ pci_out: static const struct pci_device_id platform_pci_tbl[] = { {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM_XS61, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} }; diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 0451e6ebc21a..0893c1012de6 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev, return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr)); } +static inline bool range_requires_alignment(phys_addr_t p, size_t size) +{ + phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT); + phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT; + + return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn); +} + static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) { unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p); unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size); - phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT); next_bfn = pfn_to_bfn(xen_pfn); - /* If buffer is physically aligned, ensure DMA alignment. */ - if (IS_ALIGNED(p, algn) && - !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn)) - return 1; - for (i = 1; i < nr_pages; i++) if (pfn_to_bfn(++xen_pfn) != ++next_bfn) return 1; @@ -155,7 +157,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size, *dma_handle = xen_phys_to_dma(dev, phys); if (*dma_handle + size - 1 > dma_mask || - range_straddles_page_boundary(phys, size)) { + range_straddles_page_boundary(phys, size) || + range_requires_alignment(phys, size)) { if (xen_create_contiguous_region(phys, order, fls64(dma_mask), dma_handle) != 0) goto out_free_pages; @@ -181,7 +184,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr, size = ALIGN(size, XEN_PAGE_SIZE); if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) || - WARN_ON_ONCE(range_straddles_page_boundary(phys, size))) + WARN_ON_ONCE(range_straddles_page_boundary(phys, size) || + range_requires_alignment(phys, size))) return; if (TestClearPageXenRemapped(virt_to_page(vaddr))) diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index 2754bdfadcb8..4ba73320694a 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -77,6 +77,7 @@ enum xb_req_state { struct xb_req_data { struct list_head list; wait_queue_head_t wq; + struct kref kref; struct xsd_sockmsg msg; uint32_t caller_req_id; enum xsd_sockmsg_type type; @@ -103,6 +104,7 @@ int xb_init_comms(void); void xb_deinit_comms(void); int xs_watch_msg(struct xs_watch_event *event); void xs_request_exit(struct xb_req_data *req); +void xs_free_req(struct kref *kref); int xenbus_match(struct device *_dev, struct device_driver *_drv); int xenbus_dev_probe(struct device *_dev); diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index e5fda0256feb..82df2da1b880 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -309,8 +309,8 @@ static int process_msg(void) virt_wmb(); req->state = xb_req_state_got_reply; req->cb(req); - } else - kfree(req); + } + kref_put(&req->kref, xs_free_req); } mutex_unlock(&xs_response_mutex); @@ -386,14 +386,13 @@ static int process_writes(void) state.req->msg.type = XS_ERROR; state.req->err = err; list_del(&state.req->list); - if (state.req->state == xb_req_state_aborted) - kfree(state.req); - else { + if (state.req->state != xb_req_state_aborted) { /* write err, then update state */ virt_wmb(); state.req->state = xb_req_state_got_reply; wake_up(&state.req->wq); } + kref_put(&state.req->kref, xs_free_req); mutex_unlock(&xb_write_mutex); diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c index 0792fda49a15..c495cff3da30 100644 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c @@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req) mutex_unlock(&u->reply_mutex); kfree(req->body); - kfree(req); + kref_put(&req->kref, xs_free_req); kref_put(&u->kref, xenbus_file_free); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 1a9ded0cddcb..d3b6908110c6 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -313,7 +313,7 @@ int xenbus_dev_probe(struct device *_dev) if (err) { dev_warn(&dev->dev, "watch_otherend on %s failed.\n", dev->nodename); - return err; + goto fail_remove; } dev->spurious_threshold = 1; @@ -322,6 +322,12 @@ int xenbus_dev_probe(struct device *_dev) dev->nodename); return 0; +fail_remove: + if (drv->remove) { + down(&dev->reclaim_sem); + drv->remove(dev); + up(&dev->reclaim_sem); + } fail_put: module_put(drv->driver.owner); fail: @@ -960,9 +966,15 @@ static int __init xenbus_init(void) if (xen_pv_domain()) xen_store_domain_type = XS_PV; if (xen_hvm_domain()) + { xen_store_domain_type = XS_HVM; - if (xen_hvm_domain() && xen_initial_domain()) - xen_store_domain_type = XS_LOCAL; + err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); + if (err) + goto out_error; + xen_store_evtchn = (int)v; + if (!v && xen_initial_domain()) + xen_store_domain_type = XS_LOCAL; + } if (xen_pv_domain() && !xen_start_info->store_evtchn) xen_store_domain_type = XS_LOCAL; if (xen_pv_domain() && xen_start_info->store_evtchn) @@ -981,10 +993,6 @@ static int __init xenbus_init(void) xen_store_interface = gfn_to_virt(xen_store_gfn); break; case XS_HVM: - err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); - if (err) - goto out_error; - xen_store_evtchn = (int)v; err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); if (err) goto out_error; diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 12e02eb01f59..a4dd92719e7a 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -112,6 +112,12 @@ static void xs_suspend_exit(void) wake_up_all(&xs_state_enter_wq); } +void xs_free_req(struct kref *kref) +{ + struct xb_req_data *req = container_of(kref, struct xb_req_data, kref); + kfree(req); +} + static uint32_t xs_request_enter(struct xb_req_data *req) { uint32_t rq_id; @@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg) req->caller_req_id = req->msg.req_id; req->msg.req_id = xs_request_enter(req); + /* + * Take 2nd ref. One for this thread, and the second for the + * xenbus_thread. + */ + kref_get(&req->kref); + mutex_lock(&xb_write_mutex); list_add_tail(&req->list, &xb_write_list); notify = list_is_singular(&xb_write_list); @@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg) if (req->state == xb_req_state_queued || req->state == xb_req_state_wait_reply) req->state = xb_req_state_aborted; - else - kfree(req); + + kref_put(&req->kref, xs_free_req); mutex_unlock(&xb_write_mutex); return ret; @@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par) req->cb = xenbus_dev_queue_reply; req->par = par; req->user_req = true; + kref_init(&req->kref); xs_send(req, msg); @@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t, req->num_vecs = num_vecs; req->cb = xs_wake_up; req->user_req = false; + kref_init(&req->kref); msg.req_id = 0; msg.tx_id = t.id; diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c index c6c73a33c44d..18bc11756ad3 100644 --- a/drivers/xen/xenfs/xensyms.c +++ b/drivers/xen/xenfs/xensyms.c @@ -48,7 +48,7 @@ static int xensyms_next_sym(struct xensyms *xs) return -ENOMEM; set_xen_guest_handle(symdata->name, xs->name); - symdata->symnum--; /* Rewind */ + symdata->symnum = symnum; /* Rewind */ ret = HYPERVISOR_platform_op(&xs->op); if (ret < 0) @@ -78,7 +78,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos) { struct xensyms *xs = (struct xensyms *)m->private; - xs->op.u.symdata.symnum = ++(*pos); + *pos = xs->op.u.symdata.symnum; if (xensyms_next_sym(xs)) return NULL; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 8f287009545c..495631eba3a6 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -392,17 +392,20 @@ void v9fs_evict_inode(struct inode *inode) struct v9fs_inode *v9inode = V9FS_I(inode); __le32 version; - truncate_inode_pages_final(&inode->i_data); - version = cpu_to_le32(v9inode->qid.version); - fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode, + if (!is_bad_inode(inode)) { + truncate_inode_pages_final(&inode->i_data); + version = cpu_to_le32(v9inode->qid.version); + fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode, &version); - clear_inode(inode); - filemap_fdatawrite(&inode->i_data); - - fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false); - /* clunk the fid stashed in writeback_fid */ - p9_fid_put(v9inode->writeback_fid); - v9inode->writeback_fid = NULL; + clear_inode(inode); + filemap_fdatawrite(&inode->i_data); + if (v9fs_inode_cookie(v9inode)) + fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false); + /* clunk the fid stashed in writeback_fid */ + p9_fid_put(v9inode->writeback_fid); + v9inode->writeback_fid = NULL; + } else + clear_inode(inode); } static int v9fs_test_inode(struct inode *inode, void *data) diff --git a/fs/Kconfig b/fs/Kconfig index 937ee6de75a5..314e4886517e 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -347,6 +347,7 @@ config GRACE_PERIOD config LOCKD tristate depends on FILE_LOCKING + select CRC32 select GRACE_PERIOD config LOCKD_V4 diff --git a/fs/affs/file.c b/fs/affs/file.c index 8daeed31e1af..52d11931025c 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -595,7 +595,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize) BUG_ON(tmp > bsize); AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); affs_fix_checksum(sb, bh); bh->b_state &= ~(1UL << BH_New); @@ -724,7 +724,8 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, tmp = min(bsize - boff, to - from); BUG_ON(boff + tmp > bsize || tmp > bsize); memcpy(AFFS_DATA(bh) + boff, data + from, tmp); - be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp); + AFFS_DATA_HEAD(bh)->size = cpu_to_be32( + max(boff + tmp, be32_to_cpu(AFFS_DATA_HEAD(bh)->size))); affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); written += tmp; @@ -746,7 +747,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(bsize); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); @@ -780,7 +781,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, if (buffer_new(bh)) { AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); - AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); + AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx + 1); AFFS_DATA_HEAD(bh)->size = cpu_to_be32(tmp); AFFS_DATA_HEAD(bh)->next = 0; bh->b_state &= ~(1UL << BH_New); diff --git a/fs/afs/afs.h b/fs/afs/afs.h index 432cb4b23961..3ea5f3e3c922 100644 --- a/fs/afs/afs.h +++ b/fs/afs/afs.h @@ -10,7 +10,7 @@ #include -#define AFS_MAXCELLNAME 256 /* Maximum length of a cell name */ +#define AFS_MAXCELLNAME 253 /* Maximum length of a cell name (DNS limited) */ #define AFS_MAXVOLNAME 64 /* Maximum length of a volume name */ #define AFS_MAXNSERVERS 8 /* Maximum servers in a basic volume record */ #define AFS_NMAXNSERVERS 13 /* Maximum servers in a N/U-class volume record */ diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h index 9c65ffb8a523..8da0899fbc08 100644 --- a/fs/afs/afs_vl.h +++ b/fs/afs/afs_vl.h @@ -13,6 +13,7 @@ #define AFS_VL_PORT 7003 /* volume location service port */ #define VL_SERVICE 52 /* RxRPC service ID for the Volume Location service */ #define YFS_VL_SERVICE 2503 /* Service ID for AuriStor upgraded VL service */ +#define YFS_VL_MAXCELLNAME 256 /* Maximum length of a cell name in YFS protocol */ enum AFSVL_Operations { VLGETENTRYBYID = 503, /* AFS Get VLDB entry by ID */ diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 926cb1188eba..7c0dce8eecad 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c @@ -161,6 +161,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, refcount_set(&cell->ref, 1); atomic_set(&cell->active, 0); INIT_WORK(&cell->manager, afs_manage_cell_work); + spin_lock_init(&cell->vs_lock); cell->volumes = RB_ROOT; INIT_HLIST_HEAD(&cell->proc_volumes); seqlock_init(&cell->volume_lock); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 38d5260c4614..cb537c669a8e 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -1457,7 +1457,12 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) op->file[1].vnode = vnode; } - return afs_do_sync_operation(op); + ret = afs_do_sync_operation(op); + + /* Not all systems that can host afs servers have ENOTEMPTY. */ + if (ret == -EEXIST) + ret = -ENOTEMPTY; + return ret; error: return afs_put_operation(op); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 097d5a5f07b1..fd4310272ccc 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -378,6 +378,7 @@ struct afs_cell { unsigned int debug_id; /* The volumes belonging to this cell */ + spinlock_t vs_lock; /* Lock for server->volumes */ struct rb_root volumes; /* Tree of volumes on this server */ struct hlist_head proc_volumes; /* procfs volume list */ seqlock_t volume_lock; /* For volumes */ @@ -501,6 +502,7 @@ struct afs_server { struct hlist_node addr4_link; /* Link in net->fs_addresses4 */ struct hlist_node addr6_link; /* Link in net->fs_addresses6 */ struct hlist_node proc_link; /* Link in net->fs_proc */ + struct list_head volumes; /* RCU list of afs_server_entry objects */ struct work_struct initcb_work; /* Work for CB.InitCallBackState* */ struct afs_server *gc_next; /* Next server in manager's list */ time64_t unuse_time; /* Time at which last unused */ @@ -549,12 +551,14 @@ struct afs_server { */ struct afs_server_entry { struct afs_server *server; + struct afs_volume *volume; + struct list_head slink; /* Link in server->volumes */ }; struct afs_server_list { struct rcu_head rcu; - afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */ refcount_t usage; + bool attached; /* T if attached to servers */ unsigned char nr_servers; unsigned char preferred; /* Preferred server */ unsigned short vnovol_mask; /* Servers to be skipped due to VNOVOL */ @@ -567,10 +571,9 @@ struct afs_server_list { * Live AFS volume management. */ struct afs_volume { - union { - struct rcu_head rcu; - afs_volid_t vid; /* volume ID */ - }; + struct rcu_head rcu; + afs_volid_t vid; /* The volume ID of this volume */ + afs_volid_t vids[AFS_MAXTYPES]; /* All associated volume IDs */ refcount_t ref; time64_t update_at; /* Time at which to next update */ struct afs_cell *cell; /* Cell to which belongs (pins ref) */ @@ -1450,10 +1453,14 @@ static inline struct afs_server_list *afs_get_serverlist(struct afs_server_list } extern void afs_put_serverlist(struct afs_net *, struct afs_server_list *); -extern struct afs_server_list *afs_alloc_server_list(struct afs_cell *, struct key *, - struct afs_vldb_entry *, - u8); +struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume, + struct key *key, + struct afs_vldb_entry *vldb); extern bool afs_annotate_server_list(struct afs_server_list *, struct afs_server_list *); +void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist); +void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist, + struct afs_server_list *old); +void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server_list *slist); /* * super.c diff --git a/fs/afs/server.c b/fs/afs/server.c index 0bd2f5ba6900..87381c2ffe37 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -236,6 +236,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell, server->addr_version = alist->version; server->uuid = *uuid; rwlock_init(&server->fs_lock); + INIT_LIST_HEAD(&server->volumes); INIT_WORK(&server->initcb_work, afs_server_init_callback_work); init_waitqueue_head(&server->probe_wq); INIT_LIST_HEAD(&server->probe_link); diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c index b59896b1de0a..89c75d934f79 100644 --- a/fs/afs/server_list.c +++ b/fs/afs/server_list.c @@ -24,13 +24,13 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist) /* * Build a server list from a VLDB record. */ -struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, +struct afs_server_list *afs_alloc_server_list(struct afs_volume *volume, struct key *key, - struct afs_vldb_entry *vldb, - u8 type_mask) + struct afs_vldb_entry *vldb) { struct afs_server_list *slist; struct afs_server *server; + unsigned int type_mask = 1 << volume->type; int ret = -ENOMEM, nr_servers = 0, i, j; for (i = 0; i < vldb->nr_servers; i++) @@ -44,15 +44,12 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, refcount_set(&slist->usage, 1); rwlock_init(&slist->lock); - for (i = 0; i < AFS_MAXTYPES; i++) - slist->vids[i] = vldb->vid[i]; - /* Make sure a records exists for each server in the list. */ for (i = 0; i < vldb->nr_servers; i++) { if (!(vldb->fs_mask[i] & type_mask)) continue; - server = afs_lookup_server(cell, key, &vldb->fs_server[i], + server = afs_lookup_server(volume->cell, key, &vldb->fs_server[i], vldb->addr_version[i]); if (IS_ERR(server)) { ret = PTR_ERR(server); @@ -70,8 +67,8 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, break; if (j < slist->nr_servers) { if (slist->servers[j].server == server) { - afs_put_server(cell->net, server, - afs_server_trace_put_slist_isort); + afs_unuse_server(volume->cell->net, server, + afs_server_trace_put_slist_isort); continue; } @@ -81,6 +78,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, } slist->servers[j].server = server; + slist->servers[j].volume = volume; slist->nr_servers++; } @@ -92,7 +90,7 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell, return slist; error_2: - afs_put_serverlist(cell->net, slist); + afs_put_serverlist(volume->cell->net, slist); error: return ERR_PTR(ret); } @@ -127,3 +125,99 @@ changed: return true; } + +/* + * Attach a volume to the servers it is going to use. + */ +void afs_attach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *slist) +{ + struct afs_server_entry *se, *pe; + struct afs_server *server; + struct list_head *p; + unsigned int i; + + spin_lock(&volume->cell->vs_lock); + + for (i = 0; i < slist->nr_servers; i++) { + se = &slist->servers[i]; + server = se->server; + + list_for_each(p, &server->volumes) { + pe = list_entry(p, struct afs_server_entry, slink); + if (volume->vid <= pe->volume->vid) + break; + } + list_add_tail_rcu(&se->slink, p); + } + + slist->attached = true; + spin_unlock(&volume->cell->vs_lock); +} + +/* + * Reattach a volume to the servers it is going to use when server list is + * replaced. We try to switch the attachment points to avoid rewalking the + * lists. + */ +void afs_reattach_volume_to_servers(struct afs_volume *volume, struct afs_server_list *new, + struct afs_server_list *old) +{ + unsigned int n = 0, o = 0; + + spin_lock(&volume->cell->vs_lock); + + while (n < new->nr_servers || o < old->nr_servers) { + struct afs_server_entry *pn = n < new->nr_servers ? &new->servers[n] : NULL; + struct afs_server_entry *po = o < old->nr_servers ? &old->servers[o] : NULL; + struct afs_server_entry *s; + struct list_head *p; + int diff; + + if (pn && po && pn->server == po->server) { + list_replace_rcu(&po->slink, &pn->slink); + n++; + o++; + continue; + } + + if (pn && po) + diff = memcmp(&pn->server->uuid, &po->server->uuid, + sizeof(pn->server->uuid)); + else + diff = pn ? -1 : 1; + + if (diff < 0) { + list_for_each(p, &pn->server->volumes) { + s = list_entry(p, struct afs_server_entry, slink); + if (volume->vid <= s->volume->vid) + break; + } + list_add_tail_rcu(&pn->slink, p); + n++; + } else { + list_del_rcu(&po->slink); + o++; + } + } + + spin_unlock(&volume->cell->vs_lock); +} + +/* + * Detach a volume from the servers it has been using. + */ +void afs_detach_volume_from_servers(struct afs_volume *volume, struct afs_server_list *slist) +{ + unsigned int i; + + if (!slist->attached) + return; + + spin_lock(&volume->cell->vs_lock); + + for (i = 0; i < slist->nr_servers; i++) + list_del_rcu(&slist->servers[i].slink); + + slist->attached = false; + spin_unlock(&volume->cell->vs_lock); +} diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c index f04a80e4f5c3..b2cc10df9530 100644 --- a/fs/afs/vl_alias.c +++ b/fs/afs/vl_alias.c @@ -126,7 +126,7 @@ static int afs_compare_volume_slists(const struct afs_volume *vol_a, lb = rcu_dereference(vol_b->servers); for (i = 0; i < AFS_MAXTYPES; i++) - if (la->vids[i] != lb->vids[i]) + if (vol_a->vids[i] != vol_b->vids[i]) return 0; while (a < la->nr_servers && b < lb->nr_servers) { @@ -302,6 +302,7 @@ static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key) static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key) { struct afs_cell *master; + size_t name_len; char *cell_name; cell_name = afs_vl_get_cell_name(cell, key); @@ -313,8 +314,11 @@ static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key) return 0; } - master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name), - NULL, false); + name_len = strlen(cell_name); + if (!name_len || name_len > AFS_MAXCELLNAME) + master = ERR_PTR(-EOPNOTSUPP); + else + master = afs_lookup_cell(cell->net, cell_name, name_len, NULL, false); kfree(cell_name); if (IS_ERR(master)) return PTR_ERR(master); diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index 00fca3c66ba6..16653f2ffe4f 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -671,7 +671,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) return ret; namesz = ntohl(call->tmp); - if (namesz > AFS_MAXCELLNAME) + if (namesz > YFS_VL_MAXCELLNAME) return afs_protocol_error(call, afs_eproto_cellname_len); paddedsz = (namesz + 3) & ~3; call->count = namesz; diff --git a/fs/afs/volume.c b/fs/afs/volume.c index a146d70efa65..0f64b9758127 100644 --- a/fs/afs/volume.c +++ b/fs/afs/volume.c @@ -72,15 +72,11 @@ static void afs_remove_volume_from_cell(struct afs_volume *volume) */ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, struct afs_vldb_entry *vldb, - unsigned long type_mask) + struct afs_server_list **_slist) { struct afs_server_list *slist; struct afs_volume *volume; - int ret = -ENOMEM, nr_servers = 0, i; - - for (i = 0; i < vldb->nr_servers; i++) - if (vldb->fs_mask[i] & type_mask) - nr_servers++; + int ret = -ENOMEM, i; volume = kzalloc(sizeof(struct afs_volume), GFP_KERNEL); if (!volume) @@ -99,13 +95,16 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params, rwlock_init(&volume->cb_v_break_lock); memcpy(volume->name, vldb->name, vldb->name_len + 1); - slist = afs_alloc_server_list(params->cell, params->key, vldb, type_mask); + for (i = 0; i < AFS_MAXTYPES; i++) + volume->vids[i] = vldb->vid[i]; + + slist = afs_alloc_server_list(volume, params->key, vldb); if (IS_ERR(slist)) { ret = PTR_ERR(slist); goto error_1; } - refcount_set(&slist->usage, 1); + *_slist = slist; rcu_assign_pointer(volume->servers, slist); trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc); return volume; @@ -121,17 +120,19 @@ error_0: * Look up or allocate a volume record. */ static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params, - struct afs_vldb_entry *vldb, - unsigned long type_mask) + struct afs_vldb_entry *vldb) { + struct afs_server_list *slist; struct afs_volume *candidate, *volume; - candidate = afs_alloc_volume(params, vldb, type_mask); + candidate = afs_alloc_volume(params, vldb, &slist); if (IS_ERR(candidate)) return candidate; volume = afs_insert_volume_into_cell(params->cell, candidate); - if (volume != candidate) + if (volume == candidate) + afs_attach_volume_to_servers(volume, slist); + else afs_put_volume(params->net, candidate, afs_volume_trace_put_cell_dup); return volume; } @@ -212,8 +213,7 @@ struct afs_volume *afs_create_volume(struct afs_fs_context *params) goto error; } - type_mask = 1UL << params->type; - volume = afs_lookup_volume(params, vldb, type_mask); + volume = afs_lookup_volume(params, vldb); error: kfree(vldb); @@ -225,14 +225,17 @@ error: */ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume) { + struct afs_server_list *slist = rcu_access_pointer(volume->servers); + _enter("%p", volume); #ifdef CONFIG_AFS_FSCACHE ASSERTCMP(volume->cache, ==, NULL); #endif + afs_detach_volume_from_servers(volume, slist); afs_remove_volume_from_cell(volume); - afs_put_serverlist(net, rcu_access_pointer(volume->servers)); + afs_put_serverlist(net, slist); afs_put_cell(volume->cell, afs_cell_trace_put_vol); trace_afs_volume(volume->vid, refcount_read(&volume->ref), afs_volume_trace_free); @@ -366,8 +369,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key) } /* See if the volume's server list got updated. */ - new = afs_alloc_server_list(volume->cell, key, - vldb, (1 << volume->type)); + new = afs_alloc_server_list(volume, key, vldb); if (IS_ERR(new)) { ret = PTR_ERR(new); goto error_vldb; @@ -388,9 +390,11 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key) volume->update_at = ktime_get_real_seconds() + afs_volume_record_life; write_unlock(&volume->servers_lock); - ret = 0; + if (discard == old) + afs_reattach_volume_to_servers(volume, new, old); afs_put_serverlist(volume->cell->net, discard); + ret = 0; error_vldb: kfree(vldb); error: diff --git a/fs/afs/xdr_fs.h b/fs/afs/xdr_fs.h index 8ca868164507..cc5f143d21a3 100644 --- a/fs/afs/xdr_fs.h +++ b/fs/afs/xdr_fs.h @@ -88,7 +88,7 @@ union afs_xdr_dir_block { struct { struct afs_xdr_dir_hdr hdr; - u8 alloc_ctrs[AFS_DIR_MAX_BLOCKS]; + u8 alloc_ctrs[AFS_DIR_BLOCKS_WITH_CTR]; __be16 hashtable[AFS_DIR_HASHTBL_SIZE]; } meta; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 11571cca86c1..01f333e691d6 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -655,8 +655,9 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call) static void yfs_done_fs_remove_file2(struct afs_call *call) { if (call->error == -ECONNABORTED && - call->abort_code == RX_INVALID_OPERATION) { - set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags); + (call->abort_code == RX_INVALID_OPERATION || + call->abort_code == RXGEN_OPCODE)) { + set_bit(AFS_SERVER_FL_NO_RM2, &call->op->server->flags); call->op->flags |= AFS_OPERATION_DOWNGRADE; } } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 89e7e4826efc..762704eed9ce 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -109,25 +109,6 @@ static struct linux_binfmt elf_format = { #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE)) -static int set_brk(unsigned long start, unsigned long end, int prot) -{ - start = ELF_PAGEALIGN(start); - end = ELF_PAGEALIGN(end); - if (end > start) { - /* - * Map the last of the bss segment. - * If the header is requesting these pages to be - * executable, honour that (ppc32 needs this). - */ - int error = vm_brk_flags(start, end - start, - prot & PROT_EXEC ? VM_EXEC : 0); - if (error) - return error; - } - current->mm->start_brk = current->mm->brk = end; - return 0; -} - /* We need to explicitly zero any fractional pages after the data section (i.e. bss). This would contain the junk from the file that should not @@ -248,7 +229,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, } while (0) #ifdef ARCH_DLINFO - /* + /* * ARCH_DLINFO must come first so PPC can do its special alignment of * AUXV. * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in @@ -401,6 +382,51 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, return(map_addr); } +static unsigned long elf_load(struct file *filep, unsigned long addr, + const struct elf_phdr *eppnt, int prot, int type, + unsigned long total_size) +{ + unsigned long zero_start, zero_end; + unsigned long map_addr; + + if (eppnt->p_filesz) { + map_addr = elf_map(filep, addr, eppnt, prot, type, total_size); + if (BAD_ADDR(map_addr)) + return map_addr; + if (eppnt->p_memsz > eppnt->p_filesz) { + zero_start = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + + eppnt->p_filesz; + zero_end = map_addr + ELF_PAGEOFFSET(eppnt->p_vaddr) + + eppnt->p_memsz; + + /* Zero the end of the last mapped page */ + padzero(zero_start); + } + } else { + map_addr = zero_start = ELF_PAGESTART(addr); + zero_end = zero_start + ELF_PAGEOFFSET(eppnt->p_vaddr) + + eppnt->p_memsz; + } + if (eppnt->p_memsz > eppnt->p_filesz) { + /* + * Map the last of the segment. + * If the header is requesting these pages to be + * executable, honour that (ppc32 needs this). + */ + int error; + + zero_start = ELF_PAGEALIGN(zero_start); + zero_end = ELF_PAGEALIGN(zero_end); + + error = vm_brk_flags(zero_start, zero_end - zero_start, + prot & PROT_EXEC ? VM_EXEC : 0); + if (error) + map_addr = error; + } + return map_addr; +} + + static unsigned long total_mapping_size(const struct elf_phdr *phdr, int nr) { elf_addr_t min_addr = -1; @@ -829,8 +855,8 @@ static int load_elf_binary(struct linux_binprm *bprm) unsigned long error; struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL; struct elf_phdr *elf_property_phdata = NULL; - unsigned long elf_bss, elf_brk; - int bss_prot = 0; + unsigned long elf_brk; + bool brk_moved = false; int retval, i; unsigned long elf_entry; unsigned long e_entry; @@ -1021,8 +1047,7 @@ out_free_interp: executable_stack); if (retval < 0) goto out_free_dentry; - - elf_bss = 0; + elf_brk = 0; start_code = ~0UL; @@ -1042,33 +1067,6 @@ out_free_interp: if (elf_ppnt->p_type != PT_LOAD) continue; - if (unlikely (elf_brk > elf_bss)) { - unsigned long nbyte; - - /* There was a PT_LOAD segment with p_memsz > p_filesz - before this one. Map anonymous pages, if needed, - and clear the area. */ - retval = set_brk(elf_bss + load_bias, - elf_brk + load_bias, - bss_prot); - if (retval) - goto out_free_dentry; - nbyte = ELF_PAGEOFFSET(elf_bss); - if (nbyte) { - nbyte = ELF_MIN_ALIGN - nbyte; - if (nbyte > elf_brk - elf_bss) - nbyte = elf_brk - elf_bss; - if (clear_user((void __user *)elf_bss + - load_bias, nbyte)) { - /* - * This bss-zeroing can fail if the ELF - * file specifies odd protections. So - * we don't check the return value - */ - } - } - } - elf_prot = make_prot(elf_ppnt->p_flags, &arch_state, !!interpreter, false); @@ -1096,47 +1094,7 @@ out_free_interp: * Header for ET_DYN binaries to calculate the * randomization (load_bias) for all the LOAD * Program Headers. - * - * There are effectively two types of ET_DYN - * binaries: programs (i.e. PIE: ET_DYN with INTERP) - * and loaders (ET_DYN without INTERP, since they - * _are_ the ELF interpreter). The loaders must - * be loaded away from programs since the program - * may otherwise collide with the loader (especially - * for ET_EXEC which does not have a randomized - * position). For example to handle invocations of - * "./ld.so someprog" to test out a new version of - * the loader, the subsequent program that the - * loader loads must avoid the loader itself, so - * they cannot share the same load range. Sufficient - * room for the brk must be allocated with the - * loader as well, since brk must be available with - * the loader. - * - * Therefore, programs are loaded offset from - * ELF_ET_DYN_BASE and loaders are loaded into the - * independently randomized mmap region (0 load_bias - * without MAP_FIXED nor MAP_FIXED_NOREPLACE). */ - if (interpreter) { - load_bias = ELF_ET_DYN_BASE; - if (current->flags & PF_RANDOMIZE) - load_bias += arch_mmap_rnd(); - alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); - if (alignment) - load_bias &= ~(alignment - 1); - elf_flags |= MAP_FIXED_NOREPLACE; - } else - load_bias = 0; - - /* - * Since load_bias is used for all subsequent loading - * calculations, we must lower it by the first vaddr - * so that the remaining calculations based on the - * ELF vaddrs will be correctly offset. The result - * is then page aligned. - */ - load_bias = ELF_PAGESTART(load_bias - vaddr); /* * Calculate the entire size of the ELF mapping @@ -1162,9 +1120,90 @@ out_free_interp: retval = -EINVAL; goto out_free_dentry; } + + /* Calculate any requested alignment. */ + alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum); + + /** + * DOC: PIE handling + * + * There are effectively two types of ET_DYN ELF + * binaries: programs (i.e. PIE: ET_DYN with + * PT_INTERP) and loaders (i.e. static PIE: ET_DYN + * without PT_INTERP, usually the ELF interpreter + * itself). Loaders must be loaded away from programs + * since the program may otherwise collide with the + * loader (especially for ET_EXEC which does not have + * a randomized position). + * + * For example, to handle invocations of + * "./ld.so someprog" to test out a new version of + * the loader, the subsequent program that the + * loader loads must avoid the loader itself, so + * they cannot share the same load range. Sufficient + * room for the brk must be allocated with the + * loader as well, since brk must be available with + * the loader. + * + * Therefore, programs are loaded offset from + * ELF_ET_DYN_BASE and loaders are loaded into the + * independently randomized mmap region (0 load_bias + * without MAP_FIXED nor MAP_FIXED_NOREPLACE). + * + * See below for "brk" handling details, which is + * also affected by program vs loader and ASLR. + */ + if (interpreter) { + /* On ET_DYN with PT_INTERP, we do the ASLR. */ + load_bias = ELF_ET_DYN_BASE; + if (current->flags & PF_RANDOMIZE) + load_bias += arch_mmap_rnd(); + /* Adjust alignment as requested. */ + if (alignment) + load_bias &= ~(alignment - 1); + elf_flags |= MAP_FIXED_NOREPLACE; + } else { + /* + * For ET_DYN without PT_INTERP, we rely on + * the architectures's (potentially ASLR) mmap + * base address (via a load_bias of 0). + * + * When a large alignment is requested, we + * must do the allocation at address "0" right + * now to discover where things will load so + * that we can adjust the resulting alignment. + * In this case (load_bias != 0), we can use + * MAP_FIXED_NOREPLACE to make sure the mapping + * doesn't collide with anything. + */ + if (alignment > ELF_MIN_ALIGN) { + load_bias = elf_load(bprm->file, 0, elf_ppnt, + elf_prot, elf_flags, total_size); + if (BAD_ADDR(load_bias)) { + retval = IS_ERR_VALUE(load_bias) ? + PTR_ERR((void*)load_bias) : -EINVAL; + goto out_free_dentry; + } + vm_munmap(load_bias, total_size); + /* Adjust alignment as requested. */ + if (alignment) + load_bias &= ~(alignment - 1); + elf_flags |= MAP_FIXED_NOREPLACE; + } else + load_bias = 0; + } + + /* + * Since load_bias is used for all subsequent loading + * calculations, we must lower it by the first vaddr + * so that the remaining calculations based on the + * ELF vaddrs will be correctly offset. The result + * is then page aligned. + */ + load_bias = ELF_PAGESTART(load_bias - vaddr); } - error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, + error = elf_load(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, total_size); if (BAD_ADDR(error)) { retval = IS_ERR((void *)error) ? @@ -1212,41 +1251,23 @@ out_free_interp: k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; - if (k > elf_bss) - elf_bss = k; if ((elf_ppnt->p_flags & PF_X) && end_code < k) end_code = k; if (end_data < k) end_data = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; - if (k > elf_brk) { - bss_prot = elf_prot; + if (k > elf_brk) elf_brk = k; - } } e_entry = elf_ex->e_entry + load_bias; phdr_addr += load_bias; - elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; end_code += load_bias; start_data += load_bias; end_data += load_bias; - /* Calling set_brk effectively mmaps the pages that we need - * for the bss and break sections. We must do this before - * mapping in the interpreter, to make sure it doesn't wind - * up getting placed where the bss needs to go. - */ - retval = set_brk(elf_bss, elf_brk, bss_prot); - if (retval) - goto out_free_dentry; - if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { - retval = -EFAULT; /* Nobody gets to see this, but.. */ - goto out_free_dentry; - } - if (interpreter) { elf_entry = load_elf_interp(interp_elf_ex, interpreter, @@ -1302,24 +1323,44 @@ out_free_interp: mm->end_data = end_data; mm->start_stack = bprm->p; - if ((current->flags & PF_RANDOMIZE) && (snapshot_randomize_va_space > 1)) { + /** + * DOC: "brk" handling + * + * For architectures with ELF randomization, when executing a + * loader directly (i.e. static PIE: ET_DYN without PT_INTERP), + * move the brk area out of the mmap region and into the unused + * ELF_ET_DYN_BASE region. Since "brk" grows up it may collide + * early with the stack growing down or other regions being put + * into the mmap region by the kernel (e.g. vdso). + * + * In the CONFIG_COMPAT_BRK case, though, everything is turned + * off because we're not allowed to move the brk at all. + */ + if (!IS_ENABLED(CONFIG_COMPAT_BRK) && + IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && + elf_ex->e_type == ET_DYN && !interpreter) { + elf_brk = ELF_ET_DYN_BASE; + /* This counts as moving the brk, so let brk(2) know. */ + brk_moved = true; + } + mm->start_brk = mm->brk = ELF_PAGEALIGN(elf_brk); + + if ((current->flags & PF_RANDOMIZE) && snapshot_randomize_va_space > 1) { /* - * For architectures with ELF randomization, when executing - * a loader directly (i.e. no interpreter listed in ELF - * headers), move the brk area out of the mmap region - * (since it grows up, and may collide early with the stack - * growing down), and into the unused ELF_ET_DYN_BASE region. + * If we didn't move the brk to ELF_ET_DYN_BASE (above), + * leave a gap between .bss and brk. */ - if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && - elf_ex->e_type == ET_DYN && !interpreter) { - mm->brk = mm->start_brk = ELF_ET_DYN_BASE; - } + if (!brk_moved) + mm->brk = mm->start_brk = mm->brk + PAGE_SIZE; mm->brk = mm->start_brk = arch_randomize_brk(mm); + brk_moved = true; + } + #ifdef compat_brk_randomized + if (brk_moved) current->brk_randomized = 1; #endif - } if (current->personality & MMAP_PAGE_ZERO) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, @@ -1522,7 +1563,7 @@ static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset) phdr->p_align = 0; } -static void fill_note(struct memelfnote *note, const char *name, int type, +static void fill_note(struct memelfnote *note, const char *name, int type, unsigned int sz, void *data) { note->name = name; @@ -2005,8 +2046,8 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t) t->num_notes = 0; fill_prstatus(&t->prstatus.common, p, signr); - elf_core_copy_task_regs(p, &t->prstatus.pr_reg); - + elf_core_copy_task_regs(p, &t->prstatus.pr_reg); + fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &(t->prstatus)); t->num_notes++; @@ -2296,7 +2337,7 @@ static int elf_core_dump(struct coredump_params *cprm) if (!elf_core_write_extra_phdrs(cprm, offset)) goto end_coredump; - /* write out the notes section */ + /* write out the notes section */ if (!write_note_info(&info, cprm)) goto end_coredump; diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index c71a40927315..b2d3b6e43bb5 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1603,7 +1603,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) if (!elf_core_write_extra_phdrs(cprm, offset)) goto end_coredump; - /* write out the notes section */ + /* write out the notes section */ if (!writenote(thread_list->notes, cprm)) goto end_coredump; if (!writenote(&psinfo_note, cprm)) diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index cd6d5bbb4b9d..3f740d8abb4f 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -478,7 +478,7 @@ static int load_flat_file(struct linux_binprm *bprm, * 28 bits (256 MB) is way more than reasonable in this case. * If some top bits are set we have probable binary corruption. */ - if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) { + if ((text_len | data_len | bss_len | stack_len | relocs | full_data) >> 28) { pr_err("bad header\n"); ret = -ENOEXEC; goto err; diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 0dcf7fecaf55..91440ef79a26 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1678,6 +1678,17 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) up_write(&space_info->groups_sem); goto next; } + + /* + * Cache the zone_unusable value before turning the block group + * to read only. As soon as the block group is read only it's + * zone_unusable value gets moved to the block group's read-only + * bytes and isn't available for calculations anymore. We also + * cache it before unlocking the block group, to prevent races + * (reports from KCSAN and such tools) with tasks updating it. + */ + zone_unusable = bg->zone_unusable; + spin_unlock(&bg->lock); /* @@ -1693,13 +1704,6 @@ void btrfs_reclaim_bgs_work(struct work_struct *work) goto next; } - /* - * Cache the zone_unusable value before turning the block group - * to read only. As soon as the blog group is read only it's - * zone_unusable value gets moved to the block group's read-only - * bytes and isn't available for calculations anymore. - */ - zone_unusable = bg->zone_unusable; ret = inc_block_group_ro(bg, 0); up_write(&space_info->groups_sem); if (ret < 0) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 66d1f34c3fc6..c7171b286de7 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -78,6 +78,8 @@ size_t __attribute_const__ btrfs_get_num_csums(void) struct btrfs_path *btrfs_alloc_path(void) { + might_sleep(); + return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); } @@ -402,13 +404,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, * bytes the allocator should try to find free next to the block it returns. * This is just a hint and may be ignored by the allocator. */ -static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, - struct btrfs_root *root, - struct extent_buffer *buf, - struct extent_buffer *parent, int parent_slot, - struct extent_buffer **cow_ret, - u64 search_start, u64 empty_size, - enum btrfs_lock_nesting nest) +int btrfs_force_cow_block(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct extent_buffer *buf, + struct extent_buffer *parent, int parent_slot, + struct extent_buffer **cow_ret, + u64 search_start, u64 empty_size, + enum btrfs_lock_nesting nest) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_disk_key disk_key; @@ -518,6 +520,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, btrfs_free_tree_block(trans, btrfs_root_id(root), buf, parent_start, last_ref); } + + trace_btrfs_cow_block(root, buf, cow); if (unlock_orig) btrfs_tree_unlock(buf); free_extent_buffer_stale(buf); @@ -557,7 +561,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans, } /* - * cows a single block, see __btrfs_cow_block for the real work. + * COWs a single block, see btrfs_force_cow_block() for the real work. * This version of it has extra checks so that a block isn't COWed more than * once per transaction, as long as it hasn't been written yet */ @@ -569,7 +573,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, { struct btrfs_fs_info *fs_info = root->fs_info; u64 search_start; - int ret; if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) { btrfs_abort_transaction(trans, -EUCLEAN); @@ -610,12 +613,8 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans, * Also We don't care about the error, as it's handled internally. */ btrfs_qgroup_trace_subtree_after_cow(trans, root, buf); - ret = __btrfs_cow_block(trans, root, buf, parent, - parent_slot, cow_ret, search_start, 0, nest); - - trace_btrfs_cow_block(root, buf, *cow_ret); - - return ret; + return btrfs_force_cow_block(trans, root, buf, parent, parent_slot, + cow_ret, search_start, 0, nest); } ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO); @@ -762,11 +761,11 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, search_start = last_block; btrfs_tree_lock(cur); - err = __btrfs_cow_block(trans, root, cur, parent, i, - &cur, search_start, - min(16 * blocksize, - (end_slot - i) * blocksize), - BTRFS_NESTING_COW); + err = btrfs_force_cow_block(trans, root, cur, parent, i, + &cur, search_start, + min(16 * blocksize, + (end_slot - i) * blocksize), + BTRFS_NESTING_COW); if (err) { btrfs_tree_unlock(cur); free_extent_buffer(cur); @@ -1971,7 +1970,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, const struct btrfs_key *key, struct btrfs_path *p, int ins_len, int cow) { - struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_fs_info *fs_info; struct extent_buffer *b; int slot; int ret; @@ -1984,6 +1983,12 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, int min_write_lock_level; int prev_cmp; + if (!root) + return -EINVAL; + + fs_info = root->fs_info; + might_sleep(); + lowest_level = p->lowest_level; WARN_ON(lowest_level && ins_len > 0); WARN_ON(p->nodes[0] != NULL); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index cca1acf2e037..da8986e0c422 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2987,8 +2987,6 @@ enum btrfs_flush_state { int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, struct btrfs_block_rsv *rsv, int nitems, bool use_global_rsv); -void btrfs_subvolume_release_metadata(struct btrfs_root *root, - struct btrfs_block_rsv *rsv); void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes); int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, @@ -3034,6 +3032,13 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans, struct extent_buffer *parent, int parent_slot, struct extent_buffer **cow_ret, enum btrfs_lock_nesting nest); +int btrfs_force_cow_block(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct extent_buffer *buf, + struct extent_buffer *parent, int parent_slot, + struct extent_buffer **cow_ret, + u64 search_start, u64 empty_size, + enum btrfs_lock_nesting nest); int btrfs_copy_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf, diff --git a/fs/btrfs/discard.c b/fs/btrfs/discard.c index bd9dde374e5d..a90f3cb83c70 100644 --- a/fs/btrfs/discard.c +++ b/fs/btrfs/discard.c @@ -78,8 +78,6 @@ static void __add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, struct btrfs_block_group *block_group) { lockdep_assert_held(&discard_ctl->lock); - if (!btrfs_run_discard_work(discard_ctl)) - return; if (list_empty(&block_group->discard_list) || block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED) { @@ -102,6 +100,9 @@ static void add_to_discard_list(struct btrfs_discard_ctl *discard_ctl, if (!btrfs_is_block_group_data_only(block_group)) return; + if (!btrfs_run_discard_work(discard_ctl)) + return; + spin_lock(&discard_ctl->lock); __add_to_discard_list(discard_ctl, block_group); spin_unlock(&discard_ctl->lock); @@ -151,13 +152,7 @@ static bool remove_from_discard_list(struct btrfs_discard_ctl *discard_ctl, block_group->discard_eligible_time = 0; queued = !list_empty(&block_group->discard_list); list_del_init(&block_group->discard_list); - /* - * If the block group is currently running in the discard workfn, we - * don't want to deref it, since it's still being used by the workfn. - * The workfn will notice this case and deref the block group when it is - * finished. - */ - if (queued && !running) + if (queued) btrfs_put_block_group(block_group); spin_unlock(&discard_ctl->lock); @@ -233,6 +228,18 @@ again: block_group->used != 0) { if (btrfs_is_block_group_data_only(block_group)) { __add_to_discard_list(discard_ctl, block_group); + /* + * The block group must have been moved to other + * discard list even if discard was disabled in + * the meantime or a transaction abort happened, + * otherwise we can end up in an infinite loop, + * always jumping into the 'again' label and + * keep getting this block group over and over + * in case there are no other block groups in + * the discard lists. + */ + ASSERT(block_group->discard_index != + BTRFS_DISCARD_INDEX_UNUSED); } else { list_del_init(&block_group->discard_list); btrfs_put_block_group(block_group); @@ -243,9 +250,10 @@ again: block_group->discard_cursor = block_group->start; block_group->discard_state = BTRFS_DISCARD_EXTENTS; } - discard_ctl->block_group = block_group; } if (block_group) { + btrfs_get_block_group(block_group); + discard_ctl->block_group = block_group; *discard_state = block_group->discard_state; *discard_index = block_group->discard_index; } @@ -469,9 +477,20 @@ static void btrfs_discard_workfn(struct work_struct *work) block_group = peek_discard_list(discard_ctl, &discard_state, &discard_index, now); - if (!block_group || !btrfs_run_discard_work(discard_ctl)) + if (!block_group) return; + if (!btrfs_run_discard_work(discard_ctl)) { + spin_lock(&discard_ctl->lock); + btrfs_put_block_group(block_group); + discard_ctl->block_group = NULL; + spin_unlock(&discard_ctl->lock); + return; + } if (now < block_group->discard_eligible_time) { + spin_lock(&discard_ctl->lock); + btrfs_put_block_group(block_group); + discard_ctl->block_group = NULL; + spin_unlock(&discard_ctl->lock); btrfs_discard_schedule_work(discard_ctl, false); return; } @@ -523,15 +542,7 @@ static void btrfs_discard_workfn(struct work_struct *work) spin_lock(&discard_ctl->lock); discard_ctl->prev_discard = trimmed; discard_ctl->prev_discard_time = now; - /* - * If the block group was removed from the discard list while it was - * running in this workfn, then we didn't deref it, since this function - * still owned that reference. But we set the discard_ctl->block_group - * back to NULL, so we can use that condition to know that now we need - * to deref the block_group. - */ - if (discard_ctl->block_group == NULL) - btrfs_put_block_group(block_group); + btrfs_put_block_group(block_group); discard_ctl->block_group = NULL; __btrfs_discard_schedule_work(discard_ctl, now, false); spin_unlock(&discard_ctl->lock); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b7ec38f7e4b9..8c0da0025bc7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -4641,6 +4641,14 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) /* clear out the rbtree of defraggable inodes */ btrfs_cleanup_defrag_inodes(fs_info); + /* + * Handle the error fs first, as it will flush and wait for all ordered + * extents. This will generate delayed iputs, thus we want to handle + * it first. + */ + if (unlikely(BTRFS_FS_ERROR(fs_info))) + btrfs_error_commit_super(fs_info); + /* * Wait for any fixup workers to complete. * If we don't wait for them here and they are still running by the time @@ -4651,6 +4659,40 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) * already the cleaner, but below we run all pending delayed iputs. */ btrfs_flush_workqueue(fs_info->fixup_workers); + /* + * Similar case here, we have to wait for delalloc workers before we + * proceed below and stop the cleaner kthread, otherwise we trigger a + * use-after-tree on the cleaner kthread task_struct when a delalloc + * worker running submit_compressed_extents() adds a delayed iput, which + * does a wake up on the cleaner kthread, which was already freed below + * when we call kthread_stop(). + */ + btrfs_flush_workqueue(fs_info->delalloc_workers); + + /* + * We can have ordered extents getting their last reference dropped from + * the fs_info->workers queue because for async writes for data bios we + * queue a work for that queue, at btrfs_wq_submit_bio(), that runs + * run_one_async_done() which calls btrfs_bio_end_io() in case the bio + * has an error, and that later function can do the final + * btrfs_put_ordered_extent() on the ordered extent attached to the bio, + * which adds a delayed iput for the inode. So we must flush the queue + * so that we don't have delayed iputs after committing the current + * transaction below and stopping the cleaner and transaction kthreads. + */ + btrfs_flush_workqueue(fs_info->workers); + + /* + * When finishing a compressed write bio we schedule a work queue item + * to finish an ordered extent - btrfs_finish_compressed_write_work() + * calls btrfs_finish_ordered_extent() which in turns does a call to + * btrfs_queue_ordered_fn(), and that queues the ordered extent + * completion either in the endio_write_workers work queue or in the + * fs_info->endio_freespace_worker work queue. We flush those queues + * below, so before we flush them we must flush this queue for the + * workers of compressed writes. + */ + flush_workqueue(fs_info->compressed_write_workers); /* * After we parked the cleaner kthread, ordered extents may have @@ -4709,9 +4751,6 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info) btrfs_err(fs_info, "commit super ret %d", ret); } - if (BTRFS_FS_ERROR(fs_info)) - btrfs_error_commit_super(fs_info); - kthread_stop(fs_info->transaction_kthread); kthread_stop(fs_info->cleaner_kthread); @@ -4867,10 +4906,6 @@ static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) /* cleanup FS via transaction */ btrfs_cleanup_transaction(fs_info); - mutex_lock(&fs_info->cleaner_mutex); - btrfs_run_delayed_iputs(fs_info); - mutex_unlock(&fs_info->cleaner_mutex); - down_write(&fs_info->cleanup_work_sem); up_write(&fs_info->cleanup_work_sem); } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0d97c8ee6b4f..5395e27f9e89 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -179,6 +179,14 @@ search_again: ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); num_refs = btrfs_extent_refs(leaf, ei); + if (unlikely(num_refs == 0)) { + ret = -EUCLEAN; + btrfs_err(fs_info, + "unexpected zero reference count for extent item (%llu %u %llu)", + key.objectid, key.type, key.offset); + btrfs_abort_transaction(trans, ret); + goto out_free; + } extent_flags = btrfs_extent_flags(leaf, ei); } else { ret = -EINVAL; @@ -190,8 +198,6 @@ search_again: goto out_free; } - - BUG_ON(num_refs == 0); } else { num_refs = 0; extent_flags = 0; @@ -221,10 +227,19 @@ search_again: goto search_again; } spin_lock(&head->lock); - if (head->extent_op && head->extent_op->update_flags) + if (head->extent_op && head->extent_op->update_flags) { extent_flags |= head->extent_op->flags_to_set; - else - BUG_ON(num_refs == 0); + } else if (unlikely(num_refs == 0)) { + spin_unlock(&head->lock); + mutex_unlock(&head->mutex); + spin_unlock(&delayed_refs->lock); + ret = -EUCLEAN; + btrfs_err(fs_info, + "unexpected zero reference count for extent %llu (%s)", + bytenr, metadata ? "metadata" : "data"); + btrfs_abort_transaction(trans, ret); + goto out_free; + } num_refs += head->ref_mod; spin_unlock(&head->lock); @@ -2372,7 +2387,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, goto out; ret = check_delayed_ref(root, path, objectid, offset, bytenr); - } while (ret == -EAGAIN); + } while (ret == -EAGAIN && !path->nowait); out: btrfs_release_path(path); @@ -5213,7 +5228,6 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans, eb->start, level, 1, &wc->refs[level], &wc->flags[level]); - BUG_ON(ret == -ENOMEM); if (ret) return ret; if (unlikely(wc->refs[level] == 0)) { @@ -5576,7 +5590,10 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans, ret = btrfs_dec_ref(trans, root, eb, 1); else ret = btrfs_dec_ref(trans, root, eb, 0); - BUG_ON(ret); /* -ENOMEM */ + if (ret) { + btrfs_abort_transaction(trans, ret); + return ret; + } if (is_fstree(root->root_key.objectid)) { ret = btrfs_qgroup_trace_leaf_items(trans, eb); if (ret) { diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 72227c0b4b5a..d5552875f872 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4459,10 +4459,10 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, return eb; } -#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { +#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct extent_buffer *eb, *exists = NULL; int ret; @@ -4498,8 +4498,11 @@ again: free_eb: btrfs_release_extent_buffer(eb); return exists; -} +#else + /* Stub to avoid linker error when compiled with optimizations turned off. */ + return NULL; #endif +} static struct extent_buffer *grab_extent_buffer( struct btrfs_fs_info *fs_info, struct page *page) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c8231677c79e..3814f09dc4ae 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -234,7 +234,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, if (args->drop_cache) btrfs_drop_extent_map_range(inode, args->start, args->end - 1, false); - if (args->start >= inode->disk_i_size && !args->replace_extent) + if (data_race(args->start >= inode->disk_i_size) && !args->replace_extent) modify_tree = 0; update_refs = (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID); @@ -1122,7 +1122,6 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, loff_t pos = iocb->ki_pos; int ret; loff_t oldsize; - loff_t start_pos; /* * Quickly bail out on NOWAIT writes if we don't have the nodatacow or @@ -1147,9 +1146,8 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, */ update_time_for_write(inode); - start_pos = round_down(pos, fs_info->sectorsize); oldsize = i_size_read(inode); - if (start_pos > oldsize) { + if (pos > oldsize) { /* Expand hole size to cover write data, preventing empty gap */ loff_t end_pos = round_up(pos + count, fs_info->sectorsize); @@ -2226,15 +2224,20 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, * will always return true. * So here we need to do extra page alignment for * filemap_range_has_page(). + * + * And do not decrease page_lockend right now, as it can be 0. */ const u64 page_lockstart = round_up(lockstart, PAGE_SIZE); - const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1; + const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE); while (1) { truncate_pagecache_range(inode, lockstart, lockend); lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, cached_state); + /* The same page or adjacent pages. */ + if (page_lockend <= page_lockstart) + break; /* * We can't have ordered extents in the range, nor dirty/writeback * pages, because we have locked the inode's VFS lock in exclusive @@ -2246,7 +2249,7 @@ static void btrfs_punch_hole_lock_range(struct inode *inode, * we do, unlock the range and retry. */ if (!filemap_range_has_page(inode->i_mapping, page_lockstart, - page_lockend)) + page_lockend - 1)) break; unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a79da940f5b2..2195e07803b8 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1428,6 +1428,7 @@ out_unlock: locked_page, clear_bits, page_ops); + btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL); start += cur_alloc_size; } @@ -1441,6 +1442,7 @@ out_unlock: clear_bits |= EXTENT_CLEAR_DATA_RESV; extent_clear_unlock_delalloc(inode, start, end, locked_page, clear_bits, page_ops); + btrfs_qgroup_free_data(inode, NULL, start, end - start + 1, NULL); } return ret; } @@ -2168,13 +2170,15 @@ error: if (nocow) btrfs_dec_nocow_writers(bg); - if (ret && cur_offset < end) + if (ret && cur_offset < end) { extent_clear_unlock_delalloc(inode, cur_offset, end, locked_page, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | PAGE_START_WRITEBACK | PAGE_END_WRITEBACK); + btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL); + } btrfs_free_path(path); return ret; } @@ -4707,6 +4711,7 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) struct btrfs_trans_handle *trans; struct btrfs_block_rsv block_rsv; u64 root_flags; + u64 qgroup_reserved = 0; int ret; down_write(&fs_info->subvol_sem); @@ -4751,12 +4756,20 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry) ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true); if (ret) goto out_undead; + qgroup_reserved = block_rsv.qgroup_rsv_reserved; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); goto out_release; } + ret = btrfs_record_root_in_trans(trans, root); + if (ret) { + btrfs_abort_transaction(trans, ret); + goto out_end_trans; + } + btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); + qgroup_reserved = 0; trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; @@ -4815,7 +4828,9 @@ out_end_trans: ret = btrfs_end_transaction(trans); inode->i_flags |= S_DEAD; out_release: - btrfs_subvolume_release_metadata(root, &block_rsv); + btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); + if (qgroup_reserved) + btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); out_undead: if (ret) { spin_lock(&dest->root_item_lock); @@ -10456,7 +10471,7 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio) */ WRITE_ONCE(priv->status, status); } - if (!atomic_dec_return(&priv->pending)) + if (atomic_dec_and_test(&priv->pending)) wake_up(&priv->wait); btrfs_bio_free_csum(bbio); bio_put(&bbio->bio); @@ -11217,6 +11232,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, if (btrfs_root_dead(root)) { spin_unlock(&root->root_item_lock); + btrfs_drew_write_unlock(&root->snapshot_lock); btrfs_exclop_finish(fs_info); btrfs_warn(fs_info, "cannot activate swapfile because subvolume %llu is being deleted", @@ -11356,6 +11372,8 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file, } start += len; + + cond_resched(); } if (bsi.block_len) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 31f7fe31b607..a30379936af5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -592,6 +592,7 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, int ret; dev_t anon_dev; u64 objectid; + u64 qgroup_reserved = 0; root_item = kzalloc(sizeof(*root_item), GFP_KERNEL); if (!root_item) @@ -629,13 +630,18 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, trans_num_items, false); if (ret) goto out_new_inode_args; + qgroup_reserved = block_rsv.qgroup_rsv_reserved; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); - btrfs_subvolume_release_metadata(root, &block_rsv); - goto out_new_inode_args; + goto out_release_rsv; } + ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root); + if (ret) + goto out; + btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); + qgroup_reserved = 0; trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; @@ -744,12 +750,15 @@ static noinline int create_subvol(struct user_namespace *mnt_userns, out: trans->block_rsv = NULL; trans->bytes_reserved = 0; - btrfs_subvolume_release_metadata(root, &block_rsv); if (ret) btrfs_end_transaction(trans); else ret = btrfs_commit_transaction(trans); +out_release_rsv: + btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); + if (qgroup_reserved) + btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); out_new_inode_args: btrfs_new_inode_args_destroy(&new_inode_args); out_inode: @@ -771,6 +780,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, struct btrfs_pending_snapshot *pending_snapshot; unsigned int trans_num_items; struct btrfs_trans_handle *trans; + struct btrfs_block_rsv *block_rsv; + u64 qgroup_reserved = 0; int ret; /* We do not support snapshotting right now. */ @@ -807,19 +818,19 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, goto free_pending; } - btrfs_init_block_rsv(&pending_snapshot->block_rsv, - BTRFS_BLOCK_RSV_TEMP); + block_rsv = &pending_snapshot->block_rsv; + btrfs_init_block_rsv(block_rsv, BTRFS_BLOCK_RSV_TEMP); /* * 1 to add dir item * 1 to add dir index * 1 to update parent inode item */ trans_num_items = create_subvol_num_items(inherit) + 3; - ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, - &pending_snapshot->block_rsv, + ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root, block_rsv, trans_num_items, false); if (ret) goto free_pending; + qgroup_reserved = block_rsv->qgroup_rsv_reserved; pending_snapshot->dentry = dentry; pending_snapshot->root = root; @@ -832,6 +843,13 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ret = PTR_ERR(trans); goto fail; } + ret = btrfs_record_root_in_trans(trans, BTRFS_I(dir)->root); + if (ret) { + btrfs_end_transaction(trans); + goto fail; + } + btrfs_qgroup_convert_reserved_meta(root, qgroup_reserved); + qgroup_reserved = 0; trans->pending_snapshot = pending_snapshot; @@ -861,7 +879,9 @@ fail: if (ret && pending_snapshot->snap) pending_snapshot->snap->anon_dev = 0; btrfs_put_root(pending_snapshot->snap); - btrfs_subvolume_release_metadata(root, &pending_snapshot->block_rsv); + btrfs_block_rsv_release(fs_info, block_rsv, (u64)-1, NULL); + if (qgroup_reserved) + btrfs_qgroup_free_meta_prealloc(root, qgroup_reserved); free_pending: if (pending_snapshot->anon_dev) free_anon_bdev(pending_snapshot->anon_dev); diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c index 8083fe866d2b..56ceb23bd740 100644 --- a/fs/btrfs/ref-verify.c +++ b/fs/btrfs/ref-verify.c @@ -846,6 +846,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info, "dropping a ref for a root that doesn't have a ref on the block"); dump_block_entry(fs_info, be); dump_ref_action(fs_info, ra); + rb_erase(&ref->node, &be->refs); kfree(ref); kfree(ra); goto out_unlock; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 4c6ba97299cd..fd6ea3fcab33 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2977,6 +2977,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, int ret; ASSERT(page_index <= last_index); +again: page = find_lock_page(inode->i_mapping, page_index); if (!page) { page_cache_sync_readahead(inode->i_mapping, ra, NULL, @@ -2998,6 +2999,11 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, ret = -EIO; goto release_page; } + if (page->mapping != inode->i_mapping) { + unlock_page(page); + put_page(page); + goto again; + } } /* @@ -4423,8 +4429,18 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans, WARN_ON(!first_cow && level == 0); node = rc->backref_cache.path[level]; - BUG_ON(node->bytenr != buf->start && - node->new_bytenr != buf->start); + + /* + * If node->bytenr != buf->start and node->new_bytenr != + * buf->start then we've got the wrong backref node for what we + * expected to see here and the cache is incorrect. + */ + if (unlikely(node->bytenr != buf->start && node->new_bytenr != buf->start)) { + btrfs_err(fs_info, +"bytenr %llu was found but our backref cache was expecting %llu or %llu", + buf->start, node->bytenr, node->new_bytenr); + return -EUCLEAN; + } btrfs_backref_drop_node_buffer(node); atomic_inc(&cow->refs); diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 7d783f094306..37780ede89ba 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -532,13 +532,3 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, } return ret; } - -void btrfs_subvolume_release_metadata(struct btrfs_root *root, - struct btrfs_block_rsv *rsv) -{ - struct btrfs_fs_info *fs_info = root->fs_info; - u64 qgroup_to_release; - - btrfs_block_rsv_release(fs_info, rsv, (u64)-1, &qgroup_to_release); - btrfs_qgroup_convert_reserved_meta(root, qgroup_to_release); -} diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index a2b95ccb4cf5..0735decec99b 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -431,10 +431,8 @@ static int fs_path_ensure_buf(struct fs_path *p, int len) if (p->buf_len >= len) return 0; - if (len > PATH_MAX) { - WARN_ON(1); - return -ENOMEM; - } + if (WARN_ON(len > PATH_MAX)) + return -ENAMETOOLONG; path_len = p->end - p->start; old_buf_len = p->buf_len; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index d063379a031d..91b19d66449b 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1468,7 +1468,7 @@ static int btrfs_fill_super(struct super_block *sb, err = open_ctree(sb, fs_devices, (char *)data); if (err) { - btrfs_err(fs_info, "open_ctree failed"); + btrfs_err(fs_info, "open_ctree failed: %d", err); return err; } @@ -1640,8 +1640,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) subvol_name = btrfs_get_subvol_name_from_objectid(info, BTRFS_I(d_inode(dentry))->root->root_key.objectid); if (!IS_ERR(subvol_name)) { - seq_puts(seq, ",subvol="); - seq_escape(seq, subvol_name, " \t\n\\"); + seq_show_option(seq, "subvol", subvol_name); kfree(subvol_name); } return 0; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index fc468d1079c2..44a94ac21e2f 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -971,7 +971,7 @@ static ssize_t btrfs_nodesize_show(struct kobject *kobj, { struct btrfs_fs_info *fs_info = to_fs_info(kobj); - return sysfs_emit(buf, "%u\n", fs_info->super_copy->nodesize); + return sysfs_emit(buf, "%u\n", fs_info->nodesize); } BTRFS_ATTR(, nodesize, btrfs_nodesize_show); @@ -981,7 +981,7 @@ static ssize_t btrfs_sectorsize_show(struct kobject *kobj, { struct btrfs_fs_info *fs_info = to_fs_info(kobj); - return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize); + return sysfs_emit(buf, "%u\n", fs_info->sectorsize); } BTRFS_ATTR(, sectorsize, btrfs_sectorsize_show); @@ -1033,7 +1033,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj, { struct btrfs_fs_info *fs_info = to_fs_info(kobj); - return sysfs_emit(buf, "%u\n", fs_info->super_copy->sectorsize); + return sysfs_emit(buf, "%u\n", fs_info->sectorsize); } BTRFS_ATTR(, clone_alignment, btrfs_clone_alignment_show); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 604241e6e2c1..ff3e0d4cf4b4 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -262,8 +262,10 @@ loop: cur_trans = fs_info->running_transaction; if (cur_trans) { if (TRANS_ABORTED(cur_trans)) { + const int abort_error = cur_trans->aborted; + spin_unlock(&fs_info->trans_lock); - return cur_trans->aborted; + return abort_error; } if (btrfs_blocked_trans_types[cur_trans->state] & type) { spin_unlock(&fs_info->trans_lock); diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 28f5df3b70c8..bf3822b25c58 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1433,6 +1433,11 @@ static int check_extent_item(struct extent_buffer *leaf, dref_offset, fs_info->sectorsize); return -EUCLEAN; } + if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) { + extent_err(leaf, slot, + "invalid data ref count, should have non-zero value"); + return -EUCLEAN; + } inline_refs += btrfs_extent_data_ref_count(leaf, dref); break; /* Contains parent bytenr and ref count */ @@ -1445,6 +1450,11 @@ static int check_extent_item(struct extent_buffer *leaf, inline_offset, fs_info->sectorsize); return -EUCLEAN; } + if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) { + extent_err(leaf, slot, + "invalid shared data ref count, should have non-zero value"); + return -EUCLEAN; + } inline_refs += btrfs_shared_data_ref_count(leaf, sref); break; default: @@ -1496,8 +1506,18 @@ static int check_simple_keyed_refs(struct extent_buffer *leaf, { u32 expect_item_size = 0; - if (key->type == BTRFS_SHARED_DATA_REF_KEY) + if (key->type == BTRFS_SHARED_DATA_REF_KEY) { + struct btrfs_shared_data_ref *sref; + + sref = btrfs_item_ptr(leaf, slot, struct btrfs_shared_data_ref); + if (unlikely(btrfs_shared_data_ref_count(leaf, sref) == 0)) { + extent_err(leaf, slot, + "invalid shared data backref count, should have non-zero value"); + return -EUCLEAN; + } + expect_item_size = sizeof(struct btrfs_shared_data_ref); + } if (unlikely(btrfs_item_size(leaf, slot) != expect_item_size)) { generic_err(leaf, slot, @@ -1557,6 +1577,11 @@ static int check_extent_data_ref(struct extent_buffer *leaf, offset, leaf->fs_info->sectorsize); return -EUCLEAN; } + if (unlikely(btrfs_extent_data_ref_count(leaf, dref) == 0)) { + extent_err(leaf, slot, + "invalid extent data backref count, should have non-zero value"); + return -EUCLEAN; + } } return 0; } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8c7e74499ed1..a4177014eb8b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -671,6 +671,42 @@ u8 *btrfs_sb_fsid_ptr(struct btrfs_super_block *sb) return has_metadata_uuid ? sb->metadata_uuid : sb->fsid; } +static bool is_same_device(struct btrfs_device *device, const char *new_path) +{ + struct path old = { .mnt = NULL, .dentry = NULL }; + struct path new = { .mnt = NULL, .dentry = NULL }; + char *old_path = NULL; + bool is_same = false; + int ret; + + if (!device->name) + goto out; + + old_path = kzalloc(PATH_MAX, GFP_NOFS); + if (!old_path) + goto out; + + rcu_read_lock(); + ret = strscpy(old_path, rcu_str_deref(device->name), PATH_MAX); + rcu_read_unlock(); + if (ret < 0) + goto out; + + ret = kern_path(old_path, LOOKUP_FOLLOW, &old); + if (ret) + goto out; + ret = kern_path(new_path, LOOKUP_FOLLOW, &new); + if (ret) + goto out; + if (path_equal(&old, &new)) + is_same = true; +out: + kfree(old_path); + path_put(&old); + path_put(&new); + return is_same; +} + /* * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices * being created with a disk that has already completed its fsid change. Such @@ -889,7 +925,7 @@ static noinline struct btrfs_device *device_list_add(const char *path, disk_super->fsid, devid, found_transid, path, current->comm, task_pid_nr(current)); - } else if (!device->name || strcmp(device->name->str, path)) { + } else if (!device->name || !is_same_device(device, path)) { /* * When FS is already mounted. * 1. If you are here and if the device->name is NULL that @@ -2731,8 +2767,6 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); if (seeding_dev) { - btrfs_clear_sb_rdonly(sb); - /* GFP_KERNEL allocation must not be under device_list_mutex */ seed_devices = btrfs_init_sprout(fs_info); if (IS_ERR(seed_devices)) { @@ -2875,8 +2909,6 @@ error_sysfs: mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex); error_trans: - if (seeding_dev) - btrfs_set_sb_rdonly(sb); if (trans) btrfs_end_transaction(trans); error_free_zone: diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 794526ab90d2..1dff64e62047 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1909,6 +1909,9 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group) device = map->stripes[i].dev; physical = map->stripes[i].physical; + if (!device->bdev) + continue; + if (device->zone_info->max_active_zones == 0) continue; @@ -2052,6 +2055,9 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ struct btrfs_device *device = map->stripes[i].dev; const u64 physical = map->stripes[i].physical; + if (!device->bdev) + continue; + if (device->zone_info->max_active_zones == 0) continue; diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index 89b11336a836..1806bff8e59b 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -576,7 +577,7 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args) */ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) { - char *secctx; + int err; _enter(",%s", args); @@ -585,16 +586,16 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) return -EINVAL; } - if (cache->secctx) { + if (cache->have_secid) { pr_err("Second security context specified\n"); return -EINVAL; } - secctx = kstrdup(args, GFP_KERNEL); - if (!secctx) - return -ENOMEM; + err = security_secctx_to_secid(args, strlen(args), &cache->secid); + if (err) + return err; - cache->secctx = secctx; + cache->have_secid = true; return 0; } @@ -820,7 +821,6 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache) put_cred(cache->cache_cred); kfree(cache->rootdirname); - kfree(cache->secctx); kfree(cache->tag); _leave(""); diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index bde23e156a63..fd71775c703a 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -327,6 +327,8 @@ static void cachefiles_commit_object(struct cachefiles_object *object, static void cachefiles_clean_up_object(struct cachefiles_object *object, struct cachefiles_cache *cache) { + struct file *file; + if (test_bit(FSCACHE_COOKIE_RETIRED, &object->cookie->flags)) { if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) { cachefiles_see_object(object, cachefiles_obj_see_clean_delete); @@ -342,10 +344,14 @@ static void cachefiles_clean_up_object(struct cachefiles_object *object, } cachefiles_unmark_inode_in_use(object, object->file); - if (object->file) { - fput(object->file); - object->file = NULL; - } + + spin_lock(&object->lock); + file = object->file; + object->file = NULL; + spin_unlock(&object->lock); + + if (file) + fput(file); } /* diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 111ad6ecd4ba..4421a12960a6 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -122,7 +122,6 @@ struct cachefiles_cache { #define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */ #define CACHEFILES_ONDEMAND_MODE 4 /* T if in on-demand read mode */ char *rootdirname; /* name of cache root directory */ - char *secctx; /* LSM security context */ char *tag; /* cache binding tag */ refcount_t unbind_pincount;/* refcount to do daemon unbind */ struct xarray reqs; /* xarray of pending on-demand requests */ @@ -130,6 +129,8 @@ struct cachefiles_cache { struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ u32 ondemand_id_next; u32 msg_id_next; + u32 secid; /* LSM security id */ + bool have_secid; /* whether "secid" was set */ }; static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 2185e2908dba..3389a373faf6 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -61,26 +61,36 @@ static ssize_t cachefiles_ondemand_fd_write_iter(struct kiocb *kiocb, { struct cachefiles_object *object = kiocb->ki_filp->private_data; struct cachefiles_cache *cache = object->volume->cache; - struct file *file = object->file; + struct file *file; size_t len = iter->count; loff_t pos = kiocb->ki_pos; const struct cred *saved_cred; int ret; - if (!file) + spin_lock(&object->lock); + file = object->file; + if (!file) { + spin_unlock(&object->lock); return -ENOBUFS; + } + get_file(file); + spin_unlock(&object->lock); cachefiles_begin_secure(cache, &saved_cred); ret = __cachefiles_prepare_write(object, file, &pos, &len, true); cachefiles_end_secure(cache, saved_cred); if (ret < 0) - return ret; + goto out; trace_cachefiles_ondemand_fd_write(object, file_inode(file), pos, len); ret = __cachefiles_write(object, file, pos, iter, NULL, NULL); - if (!ret) + if (!ret) { ret = len; + kiocb->ki_pos += ret; + } +out: + fput(file); return ret; } @@ -88,12 +98,22 @@ static loff_t cachefiles_ondemand_fd_llseek(struct file *filp, loff_t pos, int whence) { struct cachefiles_object *object = filp->private_data; - struct file *file = object->file; + struct file *file; + loff_t ret; - if (!file) + spin_lock(&object->lock); + file = object->file; + if (!file) { + spin_unlock(&object->lock); return -ENOBUFS; + } + get_file(file); + spin_unlock(&object->lock); - return vfs_llseek(file, pos, whence); + ret = vfs_llseek(file, pos, whence); + fput(file); + + return ret; } static long cachefiles_ondemand_fd_ioctl(struct file *filp, unsigned int ioctl, diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c index fe777164f1d8..fc6611886b3b 100644 --- a/fs/cachefiles/security.c +++ b/fs/cachefiles/security.c @@ -18,7 +18,7 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache) struct cred *new; int ret; - _enter("{%s}", cache->secctx); + _enter("{%u}", cache->have_secid ? cache->secid : 0); new = prepare_kernel_cred(current); if (!new) { @@ -26,8 +26,8 @@ int cachefiles_get_security_ID(struct cachefiles_cache *cache) goto error; } - if (cache->secctx) { - ret = set_security_override_from_ctx(new, cache->secctx); + if (cache->have_secid) { + ret = set_security_override(new, cache->secid); if (ret < 0) { put_cred(new); pr_err("Security denies permission to nominate security context: error %d\n", diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index da9fcf48ab6c..741ca7d10032 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2447,12 +2447,11 @@ retry: if (pos < 0) { /* - * A rename didn't occur, but somehow we didn't end up where - * we thought we would. Throw a warning and try again. + * The path is longer than PATH_MAX and this function + * cannot ever succeed. Creating paths that long is + * possible with Ceph, but Linux cannot use them. */ - pr_warn("build_path did not end path lookup where " - "expected, pos is %d\n", pos); - goto retry; + return ERR_PTR(-ENAMETOOLONG); } *pbase = base; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index aa75aa796e43..bd15991166c2 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -274,7 +274,9 @@ static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end, size_t len; struct ceph_fsid fsid; struct ceph_parse_opts_ctx *pctx = fc->fs_private; + struct ceph_options *opts = pctx->copts; struct ceph_mount_options *fsopt = pctx->opts; + const char *name_start = dev_name; char *fsid_start, *fs_name_start; if (*dev_name_end != '=') { @@ -285,8 +287,14 @@ static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end, fsid_start = strchr(dev_name, '@'); if (!fsid_start) return invalfc(fc, "missing cluster fsid"); - ++fsid_start; /* start of cluster fsid */ + len = fsid_start - name_start; + kfree(opts->name); + opts->name = kstrndup(name_start, len, GFP_KERNEL); + if (!opts->name) + return -ENOMEM; + dout("using %s entity name", opts->name); + ++fsid_start; /* start of cluster fsid */ fs_name_start = strchr(fsid_start, '.'); if (!fs_name_start) return invalfc(fc, "missing file system name"); @@ -412,6 +420,8 @@ static int ceph_parse_mount_param(struct fs_context *fc, switch (token) { case Opt_snapdirname: + if (strlen(param->string) > NAME_MAX) + return invalfc(fc, "snapdirname too long"); kfree(fsopt->snapdir_name); fsopt->snapdir_name = param->string; param->string = NULL; diff --git a/fs/coredump.c b/fs/coredump.c index 4d332f147137..b0d782367fcc 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -56,6 +57,13 @@ static bool dump_vma_snapshot(struct coredump_params *cprm); static void free_vma_snapshot(struct coredump_params *cprm); +/* + * File descriptor number for the pidfd for the thread-group leader of + * the coredumping task installed into the usermode helper's file + * descriptor table. + */ +#define COREDUMP_PIDFD_NUMBER 3 + static int core_uses_pid; static unsigned int core_pipe_limit; static char core_pattern[CORENAME_MAX_SIZE] = "core"; @@ -325,6 +333,27 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, err = cn_printf(cn, "%lu", rlimit(RLIMIT_CORE)); break; + /* pidfd number */ + case 'F': { + /* + * Installing a pidfd only makes sense if + * we actually spawn a usermode helper. + */ + if (!ispipe) + break; + + /* + * Note that we'll install a pidfd for the + * thread-group leader. We know that task + * linkage hasn't been removed yet and even if + * this @current isn't the actual thread-group + * leader we know that the thread-group leader + * cannot be reaped until @current has exited. + */ + cprm->pid = task_tgid(current); + err = cn_printf(cn, "%d", COREDUMP_PIDFD_NUMBER); + break; + } default: break; } @@ -479,7 +508,7 @@ static void wait_for_dump_helpers(struct file *file) } /* - * umh_pipe_setup + * umh_coredump_setup * helper function to customize the process used * to collect the core in userspace. Specifically * it sets up a pipe and installs it as fd 0 (stdin) @@ -489,21 +518,61 @@ static void wait_for_dump_helpers(struct file *file) * is a special value that we use to trap recursive * core dumps */ -static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) +static int umh_coredump_setup(struct subprocess_info *info, struct cred *new) { struct file *files[2]; + struct file *pidfs_file = NULL; struct coredump_params *cp = (struct coredump_params *)info->data; - int err = create_pipe_files(files, 0); + int err; + + if (cp->pid) { + int fd; + + fd = pidfd_prepare(cp->pid, 0, &pidfs_file); + if (fd < 0) + return fd; + + /* + * We don't care about the fd. We also cannot simply + * replace it below because dup2() will refuse to close + * this file descriptor if its in a larval state. So + * close it! + */ + put_unused_fd(fd); + + /* + * Usermode helpers are childen of either + * system_unbound_wq or of kthreadd. So we know that + * we're starting off with a clean file descriptor + * table. So we should always be able to use + * COREDUMP_PIDFD_NUMBER as our file descriptor value. + */ + err = replace_fd(COREDUMP_PIDFD_NUMBER, pidfs_file, 0); + if (err < 0) + goto out_fail; + + pidfs_file = NULL; + } + + err = create_pipe_files(files, 0); if (err) - return err; + goto out_fail; cp->file = files[1]; err = replace_fd(0, files[0], 0); fput(files[0]); + if (err < 0) + goto out_fail; + /* and disallow core files too */ current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; + err = 0; + +out_fail: + if (pidfs_file) + fput(pidfs_file); return err; } @@ -580,7 +649,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) } if (cprm.limit == 1) { - /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. + /* See umh_coredump_setup() which sets RLIMIT_CORE = 1. * * Normally core limits are irrelevant to pipes, since * we're not writing to the file system, but we use @@ -625,7 +694,7 @@ void do_coredump(const kernel_siginfo_t *siginfo) retval = -ENOMEM; sub_info = call_usermodehelper_setup(helper_argv[0], helper_argv, NULL, GFP_KERNEL, - umh_pipe_setup, NULL, &cprm); + umh_coredump_setup, NULL, &cprm); if (sub_info) retval = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 2c797eb519da..51a641822d6c 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -1863,8 +1863,8 @@ static int dlm_tcp_listen_validate(void) { /* We don't support multi-homed hosts */ if (dlm_local_count > 1) { - log_print("TCP protocol can't handle multi-homed hosts, try SCTP"); - return -EINVAL; + log_print("Detect multi-homed hosts but use only the first IP address."); + log_print("Try SCTP, if you want to enable multi-link."); } return 0; diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c index 939e5e242b98..b3dc7ff42400 100644 --- a/fs/efivarfs/inode.c +++ b/fs/efivarfs/inode.c @@ -47,7 +47,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb, * * VariableName-12345678-1234-1234-1234-1234567891bc */ -bool efivarfs_valid_name(const char *str, int len) +static bool efivarfs_valid_name(const char *str, int len) { const char *s = str + len - EFI_VARIABLE_GUID_LEN; diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h index 8ebf3a6a8aa2..dcb973d8736c 100644 --- a/fs/efivarfs/internal.h +++ b/fs/efivarfs/internal.h @@ -50,7 +50,6 @@ bool efivar_variable_is_removable(efi_guid_t vendor, const char *name, extern const struct file_operations efivarfs_file_operations; extern const struct inode_operations efivarfs_dir_inode_operations; -extern bool efivarfs_valid_name(const char *str, int len); extern struct inode *efivarfs_get_inode(struct super_block *sb, const struct inode *dir, int mode, dev_t dev, bool is_removable); diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 77c7615bba5e..b8c4641ed152 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -64,9 +64,6 @@ static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr) const unsigned char *s = qstr->name; unsigned int len = qstr->len; - if (!efivarfs_valid_name(s, len)) - return -EINVAL; - while (len-- > EFI_VARIABLE_GUID_LEN) hash = partial_name_hash(*s++, hash); diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 44876a97cabd..1a4a614895c6 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -82,32 +82,27 @@ struct erofs_super_block { }; /* - * erofs inode datalayout (i_format in on-disk inode): + * EROFS inode datalayout (i_format in on-disk inode): * 0 - uncompressed flat inode without tail-packing inline data: - * inode, [xattrs], ... | ... | no-holed data * 1 - compressed inode with non-compact indexes: - * inode, [xattrs], [map_header], extents ... | ... * 2 - uncompressed flat inode with tail-packing inline data: - * inode, [xattrs], tailpacking data, ... | ... | no-holed data * 3 - compressed inode with compact indexes: - * inode, [xattrs], map_header, extents ... | ... * 4 - chunk-based inode with (optional) multi-device support: - * inode, [xattrs], chunk indexes ... | ... * 5~7 - reserved */ enum { EROFS_INODE_FLAT_PLAIN = 0, - EROFS_INODE_FLAT_COMPRESSION_LEGACY = 1, + EROFS_INODE_COMPRESSED_FULL = 1, EROFS_INODE_FLAT_INLINE = 2, - EROFS_INODE_FLAT_COMPRESSION = 3, + EROFS_INODE_COMPRESSED_COMPACT = 3, EROFS_INODE_CHUNK_BASED = 4, EROFS_INODE_DATALAYOUT_MAX }; static inline bool erofs_inode_is_data_compressed(unsigned int datamode) { - return datamode == EROFS_INODE_FLAT_COMPRESSION || - datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY; + return datamode == EROFS_INODE_COMPRESSED_COMPACT || + datamode == EROFS_INODE_COMPRESSED_FULL; } /* bit definitions of inode i_format */ @@ -128,11 +123,30 @@ static inline bool erofs_inode_is_data_compressed(unsigned int datamode) #define EROFS_CHUNK_FORMAT_ALL \ (EROFS_CHUNK_FORMAT_BLKBITS_MASK | EROFS_CHUNK_FORMAT_INDEXES) +/* 32-byte on-disk inode */ +#define EROFS_INODE_LAYOUT_COMPACT 0 +/* 64-byte on-disk inode */ +#define EROFS_INODE_LAYOUT_EXTENDED 1 + struct erofs_inode_chunk_info { __le16 format; /* chunk blkbits, etc. */ __le16 reserved; }; +union erofs_inode_i_u { + /* total compressed blocks for compressed inodes */ + __le32 compressed_blocks; + + /* block address for uncompressed flat inodes */ + __le32 raw_blkaddr; + + /* for device files, used to indicate old/new device # */ + __le32 rdev; + + /* for chunk-based files, it contains the summary info */ + struct erofs_inode_chunk_info c; +}; + /* 32-byte reduced form of an ondisk inode */ struct erofs_inode_compact { __le16 i_format; /* inode format hints */ @@ -143,29 +157,14 @@ struct erofs_inode_compact { __le16 i_nlink; __le32 i_size; __le32 i_reserved; - union { - /* total compressed blocks for compressed inodes */ - __le32 compressed_blocks; - /* block address for uncompressed flat inodes */ - __le32 raw_blkaddr; + union erofs_inode_i_u i_u; - /* for device files, used to indicate old/new device # */ - __le32 rdev; - - /* for chunk-based files, it contains the summary info */ - struct erofs_inode_chunk_info c; - } i_u; - __le32 i_ino; /* only used for 32-bit stat compatibility */ + __le32 i_ino; /* only used for 32-bit stat compatibility */ __le16 i_uid; __le16 i_gid; __le32 i_reserved2; }; -/* 32-byte on-disk inode */ -#define EROFS_INODE_LAYOUT_COMPACT 0 -/* 64-byte on-disk inode */ -#define EROFS_INODE_LAYOUT_EXTENDED 1 - /* 64-byte complete form of an ondisk inode */ struct erofs_inode_extended { __le16 i_format; /* inode format hints */ @@ -175,22 +174,9 @@ struct erofs_inode_extended { __le16 i_mode; __le16 i_reserved; __le64 i_size; - union { - /* total compressed blocks for compressed inodes */ - __le32 compressed_blocks; - /* block address for uncompressed flat inodes */ - __le32 raw_blkaddr; - - /* for device files, used to indicate old/new device # */ - __le32 rdev; - - /* for chunk-based files, it contains the summary info */ - struct erofs_inode_chunk_info c; - } i_u; - - /* only used for 32-bit stat compatibility */ - __le32 i_ino; + union erofs_inode_i_u i_u; + __le32 i_ino; /* only used for 32-bit stat compatibility */ __le32 i_uid; __le32 i_gid; __le64 i_mtime; @@ -199,10 +185,6 @@ struct erofs_inode_extended { __u8 i_reserved2[16]; }; -#define EROFS_MAX_SHARED_XATTRS (128) -/* h_shared_count between 129 ... 255 are special # */ -#define EROFS_SHARED_XATTR_EXTENT (255) - /* * inline xattrs (n == i_xattr_icount): * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes @@ -268,6 +250,22 @@ struct erofs_inode_chunk_index { __le32 blkaddr; /* start block address of this inode chunk */ }; +/* dirent sorts in alphabet order, thus we can do binary search */ +struct erofs_dirent { + __le64 nid; /* node number */ + __le16 nameoff; /* start offset of file name */ + __u8 file_type; /* file type */ + __u8 reserved; /* reserved */ +} __packed; + +/* + * EROFS file types should match generic FT_* types and + * it seems no need to add BUILD_BUG_ONs since potential + * unmatchness will break other fses as well... + */ + +#define EROFS_NAME_LEN 255 + /* maximum supported size of a physical compression cluster */ #define Z_EROFS_PCLUSTER_MAX_SIZE (1024 * 1024) @@ -337,10 +335,8 @@ struct z_erofs_map_header { __u8 h_clusterbits; }; -#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8 - /* - * Fixed-sized output compression on-disk logical cluster type: + * On-disk logical cluster type: * 0 - literal (uncompressed) lcluster * 1,3 - compressed lcluster (for HEAD lclusters) * 2 - compressed lcluster (for NONHEAD lclusters) @@ -364,27 +360,27 @@ struct z_erofs_map_header { * di_u.delta[1] = distance to the next HEAD lcluster */ enum { - Z_EROFS_VLE_CLUSTER_TYPE_PLAIN = 0, - Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 = 1, - Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD = 2, - Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 = 3, - Z_EROFS_VLE_CLUSTER_TYPE_MAX + Z_EROFS_LCLUSTER_TYPE_PLAIN = 0, + Z_EROFS_LCLUSTER_TYPE_HEAD1 = 1, + Z_EROFS_LCLUSTER_TYPE_NONHEAD = 2, + Z_EROFS_LCLUSTER_TYPE_HEAD2 = 3, + Z_EROFS_LCLUSTER_TYPE_MAX }; -#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 -#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 +#define Z_EROFS_LI_LCLUSTER_TYPE_BITS 2 +#define Z_EROFS_LI_LCLUSTER_TYPE_BIT 0 /* (noncompact only, HEAD) This pcluster refers to partial decompressed data */ -#define Z_EROFS_VLE_DI_PARTIAL_REF (1 << 15) +#define Z_EROFS_LI_PARTIAL_REF (1 << 15) /* * D0_CBLKCNT will be marked _only_ at the 1st non-head lcluster to store the * compressed block count of a compressed extent (in logical clusters, aka. * block count of a pcluster). */ -#define Z_EROFS_VLE_DI_D0_CBLKCNT (1 << 11) +#define Z_EROFS_LI_D0_CBLKCNT (1 << 11) -struct z_erofs_vle_decompressed_index { +struct z_erofs_lcluster_index { __le16 di_advise; /* where to decompress in the head lcluster */ __le16 di_clusterofs; @@ -401,25 +397,8 @@ struct z_erofs_vle_decompressed_index { } di_u; }; -#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \ - (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \ - sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING) - -/* dirent sorts in alphabet order, thus we can do binary search */ -struct erofs_dirent { - __le64 nid; /* node number */ - __le16 nameoff; /* start offset of file name */ - __u8 file_type; /* file type */ - __u8 reserved; /* reserved */ -} __packed; - -/* - * EROFS file types should match generic FT_* types and - * it seems no need to add BUILD_BUG_ONs since potential - * unmatchness will break other fses as well... - */ - -#define EROFS_NAME_LEN 255 +#define Z_EROFS_FULL_INDEX_ALIGN(end) \ + (ALIGN(end, 8) + sizeof(struct z_erofs_map_header) + 8) /* check the EROFS on-disk layout strictly at compile time */ static inline void erofs_check_ondisk_layout_definitions(void) @@ -436,15 +415,15 @@ static inline void erofs_check_ondisk_layout_definitions(void) BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_info) != 4); BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_index) != 8); BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8); - BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8); + BUILD_BUG_ON(sizeof(struct z_erofs_lcluster_index) != 8); BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12); /* keep in sync between 2 index structures for better extendibility */ BUILD_BUG_ON(sizeof(struct erofs_inode_chunk_index) != - sizeof(struct z_erofs_vle_decompressed_index)); + sizeof(struct z_erofs_lcluster_index)); BUILD_BUG_ON(sizeof(struct erofs_deviceslot) != 128); - BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) < - Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1); + BUILD_BUG_ON(BIT(Z_EROFS_LI_LCLUSTER_TYPE_BITS) < + Z_EROFS_LCLUSTER_TYPE_MAX - 1); /* exclude old compiler versions like gcc 7.5.0 */ BUILD_BUG_ON(__builtin_constant_p(fmh) ? fmh != cpu_to_le64(1ULL << 63) : 0); diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 183e7a4b8259..2cd70cf4c8b2 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -14,7 +14,7 @@ int z_erofs_fill_inode(struct inode *inode) if (!erofs_sb_has_big_pcluster(sbi) && !erofs_sb_has_ztailpacking(sbi) && !erofs_sb_has_fragments(sbi) && - vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { + vi->datalayout == EROFS_INODE_COMPRESSED_FULL) { vi->z_advise = 0; vi->z_algorithmtype[0] = 0; vi->z_algorithmtype[1] = 0; @@ -45,11 +45,10 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, { struct inode *const inode = m->inode; struct erofs_inode *const vi = EROFS_I(inode); - const erofs_off_t pos = - Z_EROFS_VLE_LEGACY_INDEX_ALIGN(erofs_iloc(inode) + - vi->inode_isize + vi->xattr_isize) + - lcn * sizeof(struct z_erofs_vle_decompressed_index); - struct z_erofs_vle_decompressed_index *di; + const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) + + vi->inode_isize + vi->xattr_isize) + + lcn * sizeof(struct z_erofs_lcluster_index); + struct z_erofs_lcluster_index *di; unsigned int advise, type; m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, @@ -57,33 +56,33 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, if (IS_ERR(m->kaddr)) return PTR_ERR(m->kaddr); - m->nextpackoff = pos + sizeof(struct z_erofs_vle_decompressed_index); + m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); m->lcn = lcn; di = m->kaddr + erofs_blkoff(inode->i_sb, pos); advise = le16_to_cpu(di->di_advise); - type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & - ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); + type = (advise >> Z_EROFS_LI_LCLUSTER_TYPE_BIT) & + ((1 << Z_EROFS_LI_LCLUSTER_TYPE_BITS) - 1); switch (type) { - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + case Z_EROFS_LCLUSTER_TYPE_NONHEAD: m->clusterofs = 1 << vi->z_logical_clusterbits; m->delta[0] = le16_to_cpu(di->di_u.delta[0]); - if (m->delta[0] & Z_EROFS_VLE_DI_D0_CBLKCNT) { + if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) { if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { DBG_BUGON(1); return -EFSCORRUPTED; } m->compressedblks = m->delta[0] & - ~Z_EROFS_VLE_DI_D0_CBLKCNT; + ~Z_EROFS_LI_D0_CBLKCNT; m->delta[0] = 1; } m->delta[1] = le16_to_cpu(di->di_u.delta[1]); break; - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: - if (advise & Z_EROFS_VLE_DI_PARTIAL_REF) + case Z_EROFS_LCLUSTER_TYPE_PLAIN: + case Z_EROFS_LCLUSTER_TYPE_HEAD1: + case Z_EROFS_LCLUSTER_TYPE_HEAD2: + if (advise & Z_EROFS_LI_PARTIAL_REF) m->partialref = true; m->clusterofs = le16_to_cpu(di->di_clusterofs); if (m->clusterofs >= 1 << vi->z_logical_clusterbits) { @@ -125,13 +124,13 @@ static int get_compacted_la_distance(unsigned int lclusterbits, lo = decode_compactedbits(lclusterbits, lomask, in, encodebits * i, &type); - if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) return d1; ++d1; } while (++i < vcnt); - /* vcnt - 1 (Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) item */ - if (!(lo & Z_EROFS_VLE_DI_D0_CBLKCNT)) + /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */ + if (!(lo & Z_EROFS_LI_D0_CBLKCNT)) d1 += lo - 1; return d1; } @@ -169,19 +168,19 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, lo = decode_compactedbits(lclusterbits, lomask, in, encodebits * i, &type); m->type = type; - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { + if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { m->clusterofs = 1 << lclusterbits; /* figure out lookahead_distance: delta[1] if needed */ if (lookahead) m->delta[1] = get_compacted_la_distance(lclusterbits, encodebits, vcnt, in, i); - if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { + if (lo & Z_EROFS_LI_D0_CBLKCNT) { if (!big_pcluster) { DBG_BUGON(1); return -EFSCORRUPTED; } - m->compressedblks = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; + m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT; m->delta[0] = 1; return 0; } else if (i + 1 != (int)vcnt) { @@ -195,9 +194,9 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, */ lo = decode_compactedbits(lclusterbits, lomask, in, encodebits * (i - 1), &type); - if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) lo = 0; - else if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) + else if (lo & Z_EROFS_LI_D0_CBLKCNT) lo = 1; m->delta[0] = lo + 1; return 0; @@ -211,7 +210,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, --i; lo = decode_compactedbits(lclusterbits, lomask, in, encodebits * i, &type); - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) i -= lo; if (i >= 0) @@ -223,10 +222,10 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, --i; lo = decode_compactedbits(lclusterbits, lomask, in, encodebits * i, &type); - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { - if (lo & Z_EROFS_VLE_DI_D0_CBLKCNT) { + if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { + if (lo & Z_EROFS_LI_D0_CBLKCNT) { --i; - nblk += lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT; + nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT; continue; } /* bigpcluster shouldn't have plain d0 == 1 */ @@ -257,7 +256,7 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, unsigned int amortizedshift; erofs_off_t pos; - if (lcn >= totalidx) + if (lcn >= totalidx || vi->z_logical_clusterbits > 14) return -EINVAL; m->lcn = lcn; @@ -301,10 +300,10 @@ static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m, { const unsigned int datamode = EROFS_I(m->inode)->datalayout; - if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) + if (datamode == EROFS_INODE_COMPRESSED_FULL) return legacy_load_cluster_from_disk(m, lcn); - if (datamode == EROFS_INODE_FLAT_COMPRESSION) + if (datamode == EROFS_INODE_COMPRESSED_COMPACT) return compacted_load_cluster_from_disk(m, lcn, lookahead); return -EINVAL; @@ -326,7 +325,7 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, return err; switch (m->type) { - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + case Z_EROFS_LCLUSTER_TYPE_NONHEAD: if (!m->delta[0]) { erofs_err(m->inode->i_sb, "invalid lookback distance 0 @ nid %llu", @@ -336,9 +335,9 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, } lookback_distance = m->delta[0]; continue; - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: + case Z_EROFS_LCLUSTER_TYPE_PLAIN: + case Z_EROFS_LCLUSTER_TYPE_HEAD1: + case Z_EROFS_LCLUSTER_TYPE_HEAD2: m->headtype = m->type; m->map->m_la = (lcn << lclusterbits) | m->clusterofs; return 0; @@ -367,15 +366,15 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, unsigned long lcn; int err; - DBG_BUGON(m->type != Z_EROFS_VLE_CLUSTER_TYPE_PLAIN && - m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 && - m->type != Z_EROFS_VLE_CLUSTER_TYPE_HEAD2); + DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN && + m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 && + m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2); DBG_BUGON(m->type != m->headtype); - if (m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN || - ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1) && + if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN || + ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) || - ((m->headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) && + ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { map->m_plen = 1ULL << lclusterbits; return 0; @@ -397,19 +396,19 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, * BUG_ON in the debugging mode only for developers to notice that. */ DBG_BUGON(lcn == initial_lcn && - m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD); + m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD); switch (m->type) { - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: + case Z_EROFS_LCLUSTER_TYPE_PLAIN: + case Z_EROFS_LCLUSTER_TYPE_HEAD1: + case Z_EROFS_LCLUSTER_TYPE_HEAD2: /* * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type * rather than CBLKCNT, it's a 1 lcluster-sized pcluster. */ m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits); break; - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + case Z_EROFS_LCLUSTER_TYPE_NONHEAD: if (m->delta[0] != 1) goto err_bonus_cblkcnt; if (m->compressedblks) @@ -442,7 +441,7 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m) u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits; int err; - do { + while (1) { /* handle the last EOF pcluster (no next HEAD lcluster) */ if ((lcn << lclusterbits) >= inode->i_size) { map->m_llen = inode->i_size - map->m_la; @@ -453,15 +452,17 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m) if (err) return err; - if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { - DBG_BUGON(!m->delta[1] && - m->clusterofs != 1 << lclusterbits); - } else if (m->type == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN || - m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD1 || - m->type == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2) { - /* go on until the next HEAD lcluster */ + if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { + /* work around invalid d1 generated by pre-1.0 mkfs */ + if (unlikely(!m->delta[1])) { + m->delta[1] = 1; + DBG_BUGON(1); + } + } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN || + m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 || + m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) { if (lcn != headlcn) - break; + break; /* ends at the next HEAD lcluster */ m->delta[1] = 1; } else { erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu", @@ -470,15 +471,13 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m) return -EOPNOTSUPP; } lcn += m->delta[1]; - } while (m->delta[1]); - + } map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la; return 0; } static int z_erofs_do_map_blocks(struct inode *inode, - struct erofs_map_blocks *map, - int flags) + struct erofs_map_blocks *map, int flags) { struct erofs_inode *const vi = EROFS_I(inode); bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER; @@ -508,9 +507,9 @@ static int z_erofs_do_map_blocks(struct inode *inode, end = (m.lcn + 1ULL) << lclusterbits; switch (m.type) { - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD1: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD2: + case Z_EROFS_LCLUSTER_TYPE_PLAIN: + case Z_EROFS_LCLUSTER_TYPE_HEAD1: + case Z_EROFS_LCLUSTER_TYPE_HEAD2: if (endoff >= m.clusterofs) { m.headtype = m.type; map->m_la = (m.lcn << lclusterbits) | m.clusterofs; @@ -535,7 +534,7 @@ static int z_erofs_do_map_blocks(struct inode *inode, map->m_flags |= EROFS_MAP_FULL_MAPPED; m.delta[0] = 1; fallthrough; - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + case Z_EROFS_LCLUSTER_TYPE_NONHEAD: /* get the corresponding first chunk */ err = z_erofs_extent_lookback(&m, m.delta[0]); if (err) @@ -556,7 +555,7 @@ static int z_erofs_do_map_blocks(struct inode *inode, vi->z_tailextent_headlcn = m.lcn; /* for non-compact indexes, fragmentoff is 64 bits */ if (fragment && - vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) + vi->datalayout == EROFS_INODE_COMPRESSED_FULL) vi->z_fragmentoff |= (u64)m.pblk << 32; } if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) { @@ -572,7 +571,7 @@ static int z_erofs_do_map_blocks(struct inode *inode, goto unmap_out; } - if (m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_PLAIN) { + if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) { if (map->m_llen > map->m_plen) { DBG_BUGON(1); err = -EFSCORRUPTED; @@ -582,7 +581,7 @@ static int z_erofs_do_map_blocks(struct inode *inode, Z_EROFS_COMPRESSION_INTERLACED : Z_EROFS_COMPRESSION_SHIFTED; } else { - afmt = m.headtype == Z_EROFS_VLE_CLUSTER_TYPE_HEAD2 ? + afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ? vi->z_algorithmtype[1] : vi->z_algorithmtype[0]; if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) { erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu", @@ -676,7 +675,7 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) err = -EFSCORRUPTED; goto out_put_metabuf; } - if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION && + if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 7221072f39fa..6101dbe28344 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -703,7 +703,8 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) to_free = NULL; head = file->f_ep; if (head->first == &epi->fllink && !epi->fllink.next) { - file->f_ep = NULL; + /* See eventpoll_release() for details. */ + WRITE_ONCE(file->f_ep, NULL); if (!is_file_epoll(file)) { struct epitems_head *v; v = container_of(head, struct epitems_head, epitems); @@ -1236,7 +1237,10 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v break; } } - wake_up(&ep->wq); + if (sync) + wake_up_sync(&ep->wq); + else + wake_up(&ep->wq); } if (waitqueue_active(&ep->poll_wait)) pwake++; @@ -1467,7 +1471,8 @@ allocate: spin_unlock(&file->f_lock); goto allocate; } - file->f_ep = head; + /* See eventpoll_release() for details. */ + WRITE_ONCE(file->f_ep, head); to_free = NULL; } hlist_add_head_rcu(&epi->fllink, file->f_ep); diff --git a/fs/exec.c b/fs/exec.c index a42c9b8b070d..b65af8f9a4f9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -169,7 +169,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) exit: fput(file); out: - return error; + return error; } #endif /* #ifdef CONFIG_USELIB */ @@ -1362,7 +1362,28 @@ int begin_new_exec(struct linux_binprm * bprm) set_dumpable(current->mm, SUID_DUMP_USER); perf_event_exec(); - __set_task_comm(me, kbasename(bprm->filename), true); + + /* + * If the original filename was empty, alloc_bprm() made up a path + * that will probably not be useful to admins running ps or similar. + * Let's fix it up to be something reasonable. + */ + if (bprm->comm_from_dentry) { + /* + * Hold RCU lock to keep the name from being freed behind our back. + * Use acquire semantics to make sure the terminating NUL from + * __d_alloc() is seen. + * + * Note, we're deliberately sloppy here. We don't need to care about + * detecting a concurrent rename and just want a terminated name. + */ + rcu_read_lock(); + __set_task_comm(me, smp_load_acquire(&bprm->file->f_path.dentry->d_name.name), + true); + rcu_read_unlock(); + } else { + __set_task_comm(me, kbasename(bprm->filename), true); + } /* An exec changes our domain. We are no longer part of the thread group */ @@ -1521,11 +1542,13 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename) if (fd == AT_FDCWD || filename->name[0] == '/') { bprm->filename = filename->name; } else { - if (filename->name[0] == '\0') + if (filename->name[0] == '\0') { bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); - else + bprm->comm_from_dentry = 1; + } else { bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", fd, filename->name); + } if (!bprm->fdpath) goto out_free; diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index 5b547a596380..32209acd51be 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -160,7 +160,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) return 0; } -void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) +int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) { int i, b; unsigned int ent_idx; @@ -169,13 +169,17 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) struct exfat_mount_options *opts = &sbi->options; if (!is_valid_cluster(sbi, clu)) - return; + return -EIO; ent_idx = CLUSTER_TO_BITMAP_ENT(clu); i = BITMAP_OFFSET_SECTOR_INDEX(sb, ent_idx); b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); + if (!test_bit_le(b, sbi->vol_amap[i]->b_data)) + return -EIO; + clear_bit_le(b, sbi->vol_amap[i]->b_data); + exfat_update_bh(sbi->vol_amap[i], sync); if (opts->discard) { @@ -190,6 +194,8 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync) opts->discard = 0; } } + + return 0; } /* diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 51b03b0dd5f7..e651ffb06a43 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -125,7 +125,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent type = exfat_get_entry_type(ep); if (type == TYPE_UNUSED) { brelse(bh); - break; + goto out; } if (type != TYPE_FILE && type != TYPE_DIR) { @@ -185,6 +185,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent } } +out: dir_entry->namebuf.lfn[0] = '\0'; *cpos = EXFAT_DEN_TO_B(dentry); return 0; @@ -613,6 +614,10 @@ int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync) bforget(es->bh[i]); else brelse(es->bh[i]); + + if (IS_DYNAMIC_ES(es)) + kfree(es->bh); + kfree(es); return err; } @@ -845,6 +850,7 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, /* byte offset in sector */ off = EXFAT_BLK_OFFSET(byte_offset, sb); es->start_off = off; + es->bh = es->__bh; /* sector offset in cluster */ sec = EXFAT_B_TO_BLK(byte_offset, sb); @@ -864,6 +870,16 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, es->num_entries = num_entries; num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb); + if (num_bh > ARRAY_SIZE(es->__bh)) { + es->bh = kmalloc_array(num_bh, sizeof(*es->bh), GFP_NOFS); + if (!es->bh) { + brelse(bh); + kfree(es); + return NULL; + } + es->bh[0] = bh; + } + for (i = 1; i < num_bh; i++) { /* get the next sector */ if (exfat_is_last_sector_in_cluster(sbi, sec)) { diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index e0af6ace633c..5a1251207ab2 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -169,10 +169,13 @@ struct exfat_entry_set_cache { bool modified; unsigned int start_off; int num_bh; - struct buffer_head *bh[DIR_CACHE_SIZE]; + struct buffer_head *__bh[DIR_CACHE_SIZE]; + struct buffer_head **bh; unsigned int num_entries; }; +#define IS_DYNAMIC_ES(es) ((es)->__bh != (es)->bh) + struct exfat_dir_entry { struct exfat_chain dir; int entry; @@ -416,7 +419,7 @@ int exfat_count_num_clusters(struct super_block *sb, int exfat_load_bitmap(struct super_block *sb); void exfat_free_bitmap(struct exfat_sb_info *sbi); int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync); -void exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync); +int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync); unsigned int exfat_find_free_bitmap(struct super_block *sb, unsigned int clu); int exfat_count_used_clusters(struct super_block *sb, unsigned int *ret_count); int exfat_trim_fs(struct inode *inode, struct fstrim_range *range); diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 41ae4cce1f42..9fa4cffabfb6 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -175,6 +175,7 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain BITMAP_OFFSET_SECTOR_INDEX(sb, CLUSTER_TO_BITMAP_ENT(clu)); if (p_chain->flags == ALLOC_NO_FAT_CHAIN) { + int err; unsigned int last_cluster = p_chain->dir + p_chain->size - 1; do { bool sync = false; @@ -189,7 +190,9 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain cur_cmap_i = next_cmap_i; } - exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + err = exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + if (err) + break; clu++; num_clusters++; } while (num_clusters < p_chain->size); @@ -210,16 +213,26 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain cur_cmap_i = next_cmap_i; } - exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))); + if (exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode)))) + break; clu = n_clu; num_clusters++; if (err) - goto dec_used_clus; + break; + + if (num_clusters >= sbi->num_clusters - EXFAT_FIRST_CLUSTER) { + /* + * The cluster chain includes a loop, scan the + * bitmap to get the number of used clusters. + */ + exfat_count_used_clusters(sb, &sbi->used_clusters); + + return 0; + } } while (clu != EXFAT_EOF_CLUSTER); } -dec_used_clus: sbi->used_clusters -= num_clusters; return 0; } @@ -252,7 +265,7 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain, clu = next; if (exfat_ent_get(sb, clu, &next)) return -EIO; - } while (next != EXFAT_EOF_CLUSTER); + } while (next != EXFAT_EOF_CLUSTER && count <= p_chain->size); if (p_chain->size != count) { exfat_fs_error(sb, diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 30e97c51f0e1..51743ea61733 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -364,6 +364,7 @@ static int exfat_find_empty_entry(struct inode *inode, if (ei->start_clu == EXFAT_EOF_CLUSTER) { ei->start_clu = clu.dir; p_dir->dir = clu.dir; + hint_femp.eidx = 0; } /* append to the FAT chain */ diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index fbd0329cf254..9efe97f3721b 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -638,8 +638,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi, /* Hm, nope. Are (enough) root reserved clusters available? */ if (uid_eq(sbi->s_resuid, current_fsuid()) || (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) || - capable(CAP_SYS_RESOURCE) || - (flags & EXT4_MB_USE_ROOT_BLOCKS)) { + (flags & EXT4_MB_USE_ROOT_BLOCKS) || + capable(CAP_SYS_RESOURCE)) { if (free_clusters >= (nclusters + dirty_clusters + resv_clusters)) diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 6fe3c941b565..4d6ba140276b 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -351,10 +351,9 @@ int ext4_check_blockref(const char *function, unsigned int line, { __le32 *bref = p; unsigned int blk; + journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; - if (ext4_has_feature_journal(inode->i_sb) && - (inode->i_ino == - le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) + if (journal && inode == journal->j_inode) return 0; while (bref < p+max) { diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 7ea33c3fe94e..6682b8ab11f1 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -104,6 +104,9 @@ int __ext4_check_dir_entry(const char *function, unsigned int line, else if (unlikely(le32_to_cpu(de->inode) > le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count))) error_msg = "inode out of bounds"; + else if (unlikely(next_offset == size && de->name_len == 1 && + de->name[0] == '.')) + error_msg = "'.' directory cannot be the last in data block"; else return 0; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 72abb8d6caf7..faa889882e55 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1269,6 +1269,7 @@ struct ext4_inode_info { #define EXT4_MOUNT2_MB_OPTIMIZE_SCAN 0x00000080 /* Optimize group * scanning in mballoc */ +#define EXT4_MOUNT2_ABORT 0x00000100 /* Abort filesystem */ #define clear_opt(sb, opt) EXT4_SB(sb)->s_mount_opt &= \ ~EXT4_MOUNT_##opt diff --git a/fs/ext4/fsmap.c b/fs/ext4/fsmap.c index cdf9bfe10137..53a05b8292f0 100644 --- a/fs/ext4/fsmap.c +++ b/fs/ext4/fsmap.c @@ -185,6 +185,56 @@ static inline ext4_fsblk_t ext4_fsmap_next_pblk(struct ext4_fsmap *fmr) return fmr->fmr_physical + fmr->fmr_length; } +static int ext4_getfsmap_meta_helper(struct super_block *sb, + ext4_group_t agno, ext4_grpblk_t start, + ext4_grpblk_t len, void *priv) +{ + struct ext4_getfsmap_info *info = priv; + struct ext4_fsmap *p; + struct ext4_fsmap *tmp; + struct ext4_sb_info *sbi = EXT4_SB(sb); + ext4_fsblk_t fsb, fs_start, fs_end; + int error; + + fs_start = fsb = (EXT4_C2B(sbi, start) + + ext4_group_first_block_no(sb, agno)); + fs_end = fs_start + EXT4_C2B(sbi, len); + + /* Return relevant extents from the meta_list */ + list_for_each_entry_safe(p, tmp, &info->gfi_meta_list, fmr_list) { + if (p->fmr_physical < info->gfi_next_fsblk) { + list_del(&p->fmr_list); + kfree(p); + continue; + } + if (p->fmr_physical <= fs_start || + p->fmr_physical + p->fmr_length <= fs_end) { + /* Emit the retained free extent record if present */ + if (info->gfi_lastfree.fmr_owner) { + error = ext4_getfsmap_helper(sb, info, + &info->gfi_lastfree); + if (error) + return error; + info->gfi_lastfree.fmr_owner = 0; + } + error = ext4_getfsmap_helper(sb, info, p); + if (error) + return error; + fsb = p->fmr_physical + p->fmr_length; + if (info->gfi_next_fsblk < fsb) + info->gfi_next_fsblk = fsb; + list_del(&p->fmr_list); + kfree(p); + continue; + } + } + if (info->gfi_next_fsblk < fsb) + info->gfi_next_fsblk = fsb; + + return 0; +} + + /* Transform a blockgroup's free record into a fsmap */ static int ext4_getfsmap_datadev_helper(struct super_block *sb, ext4_group_t agno, ext4_grpblk_t start, @@ -539,6 +589,7 @@ static int ext4_getfsmap_datadev(struct super_block *sb, error = ext4_mballoc_query_range(sb, info->gfi_agno, EXT4_B2C(sbi, info->gfi_low.fmr_physical), EXT4_B2C(sbi, info->gfi_high.fmr_physical), + ext4_getfsmap_meta_helper, ext4_getfsmap_datadev_helper, info); if (error) goto err; @@ -560,7 +611,8 @@ static int ext4_getfsmap_datadev(struct super_block *sb, /* Report any gaps at the end of the bg */ info->gfi_last = true; - error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster, 0, info); + error = ext4_getfsmap_datadev_helper(sb, end_ag, last_cluster + 1, + 0, info); if (error) goto err; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 694af768ac5b..abe7f769054f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -406,10 +406,11 @@ static int __check_block_validity(struct inode *inode, const char *func, unsigned int line, struct ext4_map_blocks *map) { - if (ext4_has_feature_journal(inode->i_sb) && - (inode->i_ino == - le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) + journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; + + if (journal && inode == journal->j_inode) return 0; + if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock %llu " @@ -4776,22 +4777,43 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val) inode_set_iversion_queried(inode, val); } -static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags) - +static int check_igot_inode(struct inode *inode, ext4_iget_flags flags, + const char *function, unsigned int line) { + const char *err_str; + if (flags & EXT4_IGET_EA_INODE) { - if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) - return "missing EA_INODE flag"; + if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { + err_str = "missing EA_INODE flag"; + goto error; + } if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) || - EXT4_I(inode)->i_file_acl) - return "ea_inode with extended attributes"; + EXT4_I(inode)->i_file_acl) { + err_str = "ea_inode with extended attributes"; + goto error; + } } else { - if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) - return "unexpected EA_INODE flag"; + if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { + /* + * open_by_handle_at() could provide an old inode number + * that has since been reused for an ea_inode; this does + * not indicate filesystem corruption + */ + if (flags & EXT4_IGET_HANDLE) + return -ESTALE; + err_str = "unexpected EA_INODE flag"; + goto error; + } } - if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) - return "unexpected bad inode w/o EXT4_IGET_BAD"; - return NULL; + if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) { + err_str = "unexpected bad inode w/o EXT4_IGET_BAD"; + goto error; + } + return 0; + +error: + ext4_error_inode(inode, function, line, 0, err_str); + return -EFSCORRUPTED; } struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, @@ -4803,7 +4825,6 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, struct ext4_inode_info *ei; struct ext4_super_block *es = EXT4_SB(sb)->s_es; struct inode *inode; - const char *err_str; journal_t *journal = EXT4_SB(sb)->s_journal; long ret; loff_t size; @@ -4832,10 +4853,10 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) { - if ((err_str = check_igot_inode(inode, flags)) != NULL) { - ext4_error_inode(inode, function, line, 0, err_str); + ret = check_igot_inode(inode, flags, function, line); + if (ret) { iput(inode); - return ERR_PTR(-EFSCORRUPTED); + return ERR_PTR(ret); } return inode; } @@ -5107,13 +5128,21 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ret = -EFSCORRUPTED; goto bad_inode; } - if ((err_str = check_igot_inode(inode, flags)) != NULL) { - ext4_error_inode(inode, function, line, 0, err_str); - ret = -EFSCORRUPTED; - goto bad_inode; + ret = check_igot_inode(inode, flags, function, line); + /* + * -ESTALE here means there is nothing inherently wrong with the inode, + * it's just not an inode we can return for an fhandle lookup. + */ + if (ret == -ESTALE) { + brelse(iloc.bh); + unlock_new_inode(inode); + iput(inode); + return ERR_PTR(-ESTALE); } - + if (ret) + goto bad_inode; brelse(iloc.bh); + unlock_new_inode(inode); return inode; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 03b61af25ac1..166273584aa4 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -6685,13 +6685,14 @@ int ext4_mballoc_query_range( struct super_block *sb, ext4_group_t group, - ext4_grpblk_t start, + ext4_grpblk_t first, ext4_grpblk_t end, + ext4_mballoc_query_range_fn meta_formatter, ext4_mballoc_query_range_fn formatter, void *priv) { void *bitmap; - ext4_grpblk_t next; + ext4_grpblk_t start, next; struct ext4_buddy e4b; int error; @@ -6702,10 +6703,19 @@ ext4_mballoc_query_range( ext4_lock_group(sb, group); - start = max(e4b.bd_info->bb_first_free, start); + start = max(e4b.bd_info->bb_first_free, first); if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; - + if (meta_formatter && start != first) { + if (start > end) + start = end; + ext4_unlock_group(sb, group); + error = meta_formatter(sb, group, first, start - first, + priv); + if (error) + goto out_unload; + ext4_lock_group(sb, group); + } while (start <= end) { start = mb_find_next_zero_bit(bitmap, end + 1, start); if (start > end) diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 538703499d08..2d95fcab941f 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h @@ -245,6 +245,7 @@ ext4_mballoc_query_range( ext4_group_t agno, ext4_grpblk_t start, ext4_grpblk_t end, + ext4_mballoc_query_range_fn meta_formatter, ext4_mballoc_query_range_fn formatter, void *priv); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index b6a8b6c851cc..b8683c5acffe 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -2041,7 +2041,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, * split it in half by count; each resulting block will have at least * half the space free. */ - if (i > 0) + if (i >= 0) split = count - move; else split = count/2; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 36ed29f7cca5..4dc4505761e6 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -338,9 +338,9 @@ __u32 ext4_free_group_clusters(struct super_block *sb, __u32 ext4_free_inodes_count(struct super_block *sb, struct ext4_group_desc *bg) { - return le16_to_cpu(bg->bg_free_inodes_count_lo) | + return le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_lo)) | (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ? - (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0); + (__u32)le16_to_cpu(READ_ONCE(bg->bg_free_inodes_count_hi)) << 16 : 0); } __u32 ext4_used_dirs_count(struct super_block *sb, @@ -394,9 +394,9 @@ void ext4_free_group_clusters_set(struct super_block *sb, void ext4_free_inodes_set(struct super_block *sb, struct ext4_group_desc *bg, __u32 count) { - bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count); + WRITE_ONCE(bg->bg_free_inodes_count_lo, cpu_to_le16((__u16)count)); if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT) - bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16); + WRITE_ONCE(bg->bg_free_inodes_count_hi, cpu_to_le16(count >> 16)); } void ext4_used_dirs_set(struct super_block *sb, @@ -1833,6 +1833,7 @@ static const struct mount_opts { {Opt_fc_debug_force, EXT4_MOUNT2_JOURNAL_FAST_COMMIT, MOPT_SET | MOPT_2 | MOPT_EXT4_ONLY}, #endif + {Opt_abort, EXT4_MOUNT2_ABORT, MOPT_SET | MOPT_2}, {Opt_err, 0, 0} }; @@ -1901,8 +1902,6 @@ struct ext4_fs_context { unsigned int mask_s_mount_opt; unsigned int vals_s_mount_opt2; unsigned int mask_s_mount_opt2; - unsigned long vals_s_mount_flags; - unsigned long mask_s_mount_flags; unsigned int opt_flags; /* MOPT flags */ unsigned int spec; u32 s_max_batch_time; @@ -2053,12 +2052,6 @@ EXT4_SET_CTX(mount_opt2); EXT4_CLEAR_CTX(mount_opt2); EXT4_TEST_CTX(mount_opt2); -static inline void ctx_set_mount_flag(struct ext4_fs_context *ctx, int bit) -{ - set_bit(bit, &ctx->mask_s_mount_flags); - set_bit(bit, &ctx->vals_s_mount_flags); -} - static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) { struct ext4_fs_context *ctx = fc->fs_private; @@ -2122,9 +2115,6 @@ static int ext4_parse_param(struct fs_context *fc, struct fs_parameter *param) ext4_msg(NULL, KERN_WARNING, "Ignoring removed %s option", param->key); return 0; - case Opt_abort: - ctx_set_mount_flag(ctx, EXT4_MF_FS_ABORTED); - return 0; case Opt_inlinecrypt: #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT ctx_set_flags(ctx, SB_INLINECRYPT); @@ -2751,6 +2741,13 @@ static int ext4_check_opt_consistency(struct fs_context *fc, } if (is_remount) { + if (!sbi->s_journal && + ctx_test_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT)) { + ext4_msg(NULL, KERN_WARNING, + "Remounting fs w/o journal so ignoring data_err option"); + ctx_clear_mount_opt(ctx, EXT4_MOUNT_DATA_ERR_ABORT); + } + if (ctx_test_mount_opt(ctx, EXT4_MOUNT_DAX_ALWAYS) && (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) { ext4_msg(NULL, KERN_ERR, "can't mount with " @@ -2789,8 +2786,6 @@ static void ext4_apply_options(struct fs_context *fc, struct super_block *sb) sbi->s_mount_opt |= ctx->vals_s_mount_opt; sbi->s_mount_opt2 &= ~ctx->mask_s_mount_opt2; sbi->s_mount_opt2 |= ctx->vals_s_mount_opt2; - sbi->s_mount_flags &= ~ctx->mask_s_mount_flags; - sbi->s_mount_flags |= ctx->vals_s_mount_flags; sb->s_flags &= ~ctx->mask_s_flags; sb->s_flags |= ctx->vals_s_flags; @@ -5278,6 +5273,8 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ mutex_init(&sbi->s_orphan_lock); + spin_lock_init(&sbi->s_bdev_wb_lock); + ext4_fast_commit_init(sb); sb->s_root = NULL; @@ -5330,6 +5327,11 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) "data=, fs mounted w/o journal"); goto failed_mount3a; } + if (test_opt(sb, DATA_ERR_ABORT)) { + ext4_msg(sb, KERN_ERR, + "can't mount with data_err=abort, fs mounted w/o journal"); + goto failed_mount3a; + } sbi->s_def_mount_opt &= ~EXT4_MOUNT_JOURNAL_CHECKSUM; clear_opt(sb, JOURNAL_CHECKSUM); clear_opt(sb, DATA_FLAGS); @@ -5528,7 +5530,6 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) * Save the original bdev mapping's wb_err value which could be * used to detect the metadata async write error. */ - spin_lock_init(&sbi->s_bdev_wb_lock); errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, &sbi->s_bdev_wb_err); sb->s_bdev->bd_super = sb; @@ -6447,9 +6448,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) goto restore_opts; } - if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) - ext4_abort(sb, ESHUTDOWN, "Abort forced by user"); - sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0); @@ -6618,6 +6616,14 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb)) ext4_stop_mmpd(sbi); + /* + * Handle aborting the filesystem as the last thing during remount to + * avoid obsure errors during remount when some option changes fail to + * apply due to shutdown filesystem. + */ + if (test_opt2(sb, ABORT)) + ext4_abort(sb, ESHUTDOWN, "Abort forced by user"); + return 0; restore_opts: @@ -6693,22 +6699,29 @@ static int ext4_statfs_project(struct super_block *sb, dquot->dq_dqb.dqb_bhardlimit); limit >>= sb->s_blocksize_bits; - if (limit && buf->f_blocks > limit) { + if (limit) { + uint64_t remaining = 0; + curblock = (dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits; - buf->f_blocks = limit; - buf->f_bfree = buf->f_bavail = - (buf->f_blocks > curblock) ? - (buf->f_blocks - curblock) : 0; + if (limit > curblock) + remaining = limit - curblock; + + buf->f_blocks = min(buf->f_blocks, limit); + buf->f_bfree = min(buf->f_bfree, remaining); + buf->f_bavail = min(buf->f_bavail, remaining); } limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit, dquot->dq_dqb.dqb_ihardlimit); - if (limit && buf->f_files > limit) { - buf->f_files = limit; - buf->f_ffree = - (buf->f_files > dquot->dq_dqb.dqb_curinodes) ? - (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > dquot->dq_dqb.dqb_curinodes) + remaining = limit - dquot->dq_dqb.dqb_curinodes; + + buf->f_files = min(buf->f_files, limit); + buf->f_ffree = min(buf->f_ffree, remaining); } spin_unlock(&dquot->dq_dqb_lock); @@ -6811,12 +6824,25 @@ static int ext4_release_dquot(struct dquot *dquot) { int ret, err; handle_t *handle; + bool freeze_protected = false; + + /* + * Trying to sb_start_intwrite() in a running transaction + * can result in a deadlock. Further, running transactions + * are already protected from freezing. + */ + if (!ext4_journal_current_handle()) { + sb_start_intwrite(dquot->dq_sb); + freeze_protected = true; + } handle = ext4_journal_start(dquot_to_inode(dquot), EXT4_HT_QUOTA, EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb)); if (IS_ERR(handle)) { /* Release dquot anyway to avoid endless cycle in dqput() */ dquot_release(dquot); + if (freeze_protected) + sb_end_intwrite(dquot->dq_sb); return PTR_ERR(handle); } ret = dquot_release(dquot); @@ -6827,6 +6853,10 @@ static int ext4_release_dquot(struct dquot *dquot) err = ext4_journal_stop(handle); if (!ret) ret = err; + + if (freeze_protected) + sb_end_intwrite(dquot->dq_sb); + return ret; } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 5598aec75775..95dbc7c9843b 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1127,15 +1127,24 @@ ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent, { struct inode *ea_inode; struct ext4_xattr_entry *entry; + struct ext4_iloc iloc; bool dirty = false; unsigned int ea_ino; int err; int credits; + void *end; + + if (block_csum) + end = (void *)bh->b_data + bh->b_size; + else { + ext4_get_inode_loc(parent, &iloc); + end = (void *)ext4_raw_inode(&iloc) + EXT4_SB(parent->i_sb)->s_inode_size; + } /* One credit for dec ref on ea_inode, one for orphan list addition, */ credits = 2 + extra_credits; - for (entry = first; !IS_LAST_ENTRY(entry); + for (entry = first; (void *)entry < end && !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { if (!entry->e_value_inum) continue; diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 8f1860a4b1fb..29ac1e77b5d7 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -199,7 +199,8 @@ static unsigned long dir_block_index(unsigned int level, static struct f2fs_dir_entry *find_in_block(struct inode *dir, struct page *dentry_page, const struct f2fs_filename *fname, - int *max_slots) + int *max_slots, + bool use_hash) { struct f2fs_dentry_block *dentry_blk; struct f2fs_dentry_ptr d; @@ -207,7 +208,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page); make_dentry_ptr_block(dir, &d, dentry_blk); - return f2fs_find_target_dentry(&d, fname, max_slots); + return f2fs_find_target_dentry(&d, fname, max_slots, use_hash); } #if IS_ENABLED(CONFIG_UNICODE) @@ -284,7 +285,8 @@ static inline int f2fs_match_name(const struct inode *dir, } struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, - const struct f2fs_filename *fname, int *max_slots) + const struct f2fs_filename *fname, int *max_slots, + bool use_hash) { struct f2fs_dir_entry *de; unsigned long bit_pos = 0; @@ -307,7 +309,7 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, continue; } - if (de->hash_code == fname->hash) { + if (!use_hash || de->hash_code == fname->hash) { res = f2fs_match_name(d->inode, fname, d->filename[bit_pos], le16_to_cpu(de->name_len)); @@ -334,11 +336,12 @@ found: static struct f2fs_dir_entry *find_in_level(struct inode *dir, unsigned int level, const struct f2fs_filename *fname, - struct page **res_page) + struct page **res_page, + bool use_hash) { int s = GET_DENTRY_SLOTS(fname->disk_name.len); unsigned int nbucket, nblock; - unsigned int bidx, end_block; + unsigned int bidx, end_block, bucket_no; struct page *dentry_page; struct f2fs_dir_entry *de = NULL; pgoff_t next_pgofs; @@ -348,8 +351,11 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level); nblock = bucket_blocks(level); + bucket_no = use_hash ? le32_to_cpu(fname->hash) % nbucket : 0; + +start_find_bucket: bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level, - le32_to_cpu(fname->hash) % nbucket); + bucket_no); end_block = bidx + nblock; while (bidx < end_block) { @@ -366,7 +372,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, } } - de = find_in_block(dir, dentry_page, fname, &max_slots); + de = find_in_block(dir, dentry_page, fname, &max_slots, use_hash); if (IS_ERR(de)) { *res_page = ERR_CAST(de); de = NULL; @@ -383,12 +389,18 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir, bidx++; } - if (!de && room && F2FS_I(dir)->chash != fname->hash) { - F2FS_I(dir)->chash = fname->hash; - F2FS_I(dir)->clevel = level; - } + if (de) + return de; - return de; + if (likely(use_hash)) { + if (room && F2FS_I(dir)->chash != fname->hash) { + F2FS_I(dir)->chash = fname->hash; + F2FS_I(dir)->clevel = level; + } + } else if (++bucket_no < nbucket) { + goto start_find_bucket; + } + return NULL; } struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, @@ -399,11 +411,15 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, struct f2fs_dir_entry *de = NULL; unsigned int max_depth; unsigned int level; + bool use_hash = true; *res_page = NULL; +#if IS_ENABLED(CONFIG_UNICODE) +start_find_entry: +#endif if (f2fs_has_inline_dentry(dir)) { - de = f2fs_find_in_inline_dir(dir, fname, res_page); + de = f2fs_find_in_inline_dir(dir, fname, res_page, use_hash); goto out; } @@ -419,11 +435,18 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir, } for (level = 0; level < max_depth; level++) { - de = find_in_level(dir, level, fname, res_page); + de = find_in_level(dir, level, fname, res_page, use_hash); if (de || IS_ERR(*res_page)) break; } + out: +#if IS_ENABLED(CONFIG_UNICODE) + if (IS_CASEFOLDED(dir) && !de && use_hash) { + use_hash = false; + goto start_find_entry; + } +#endif /* This is to increase the speed of f2fs_create */ if (!de) F2FS_I(dir)->task = current; diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 19b1bfef798d..fc2667f6fe18 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -3491,7 +3491,8 @@ int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry, struct f2fs_filename *fname); void f2fs_free_filename(struct f2fs_filename *fname); struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d, - const struct f2fs_filename *fname, int *max_slots); + const struct f2fs_filename *fname, int *max_slots, + bool use_hash); int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, unsigned int start_pos, struct fscrypt_str *fstr); void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent, @@ -4104,7 +4105,8 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page); int f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, const struct f2fs_filename *fname, - struct page **res_page); + struct page **res_page, + bool use_hash); int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent, struct page *ipage); int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 354a6c675423..1338f8d5edf7 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -876,7 +876,11 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw) return true; if (f2fs_compressed_file(inode)) return true; - if (f2fs_has_inline_data(inode)) + /* + * only force direct read to use buffered IO, for direct write, + * it expects inline data conversion before committing IO. + */ + if (f2fs_has_inline_data(inode) && rw == READ) return true; /* disallow direct IO if any of devices has unaligned blksize */ @@ -1065,6 +1069,13 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, return err; } + /* + * wait for inflight dio, blocks should be removed after + * IO completion. + */ + if (attr->ia_size < old_size) + inode_dio_wait(inode); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); filemap_invalidate_lock(inode->i_mapping); @@ -1897,6 +1908,12 @@ static long f2fs_fallocate(struct file *file, int mode, if (ret) goto out; + /* + * wait for inflight dio, blocks should be removed after IO + * completion. + */ + inode_dio_wait(inode); + if (mode & FALLOC_FL_PUNCH_HOLE) { if (offset >= inode->i_size) goto out; @@ -3724,7 +3741,7 @@ static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count, to_reserved = cluster_size - compr_blocks - reserved; /* for the case all blocks in cluster were reserved */ - if (to_reserved == 1) { + if (reserved && to_reserved == 1) { dn->ofs_in_node += cluster_size; goto next; } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 5a661a0e7663..687b2ce82c85 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -227,6 +227,8 @@ static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) switch (sbi->gc_mode) { case GC_IDLE_CB: + case GC_URGENT_LOW: + case GC_URGENT_MID: gc_mode = GC_CB; break; case GC_IDLE_GREEDY: diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 7a2fb9789e5e..33f54e5fd780 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -336,7 +336,8 @@ process_inline: struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, const struct f2fs_filename *fname, - struct page **res_page) + struct page **res_page, + bool use_hash) { struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); struct f2fs_dir_entry *de; @@ -353,7 +354,7 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, inline_dentry = inline_data_addr(dir, ipage); make_dentry_ptr_inline(dir, &d, inline_dentry); - de = f2fs_find_target_dentry(&d, fname, NULL); + de = f2fs_find_target_dentry(&d, fname, NULL, use_hash); unlock_page(ipage); if (IS_ERR(de)) { *res_page = ERR_CAST(de); diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 5b672df194a9..b8296b0414fc 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -695,8 +695,12 @@ retry: if (err == -ENOENT) return; + if (err == -EFSCORRUPTED) + goto stop_checkpoint; + if (err == -ENOMEM || ++count <= DEFAULT_RETRY_IO_COUNT) goto retry; +stop_checkpoint: f2fs_stop_checkpoint(sbi, false, STOP_CP_REASON_UPDATE_INODE); return; } @@ -719,8 +723,10 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc) !is_inode_flag_set(inode, FI_DIRTY_INODE)) return 0; - if (!f2fs_is_checkpoint_ready(sbi)) + if (!f2fs_is_checkpoint_ready(sbi)) { + f2fs_mark_inode_dirty_sync(inode, true); return -ENOSPC; + } /* * We need to balance fs here to prevent from producing dirty node pages diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 745ecf5523c9..ccc72781e0c6 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -1112,7 +1112,14 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) trace_f2fs_truncate_inode_blocks_enter(inode, from); level = get_node_path(inode, from, offset, noffset); - if (level < 0) { + if (level <= 0) { + if (!level) { + level = -EFSCORRUPTED; + f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u", + __func__, inode->i_ino, + from, ADDRS_PER_INODE(inode)); + set_sbi_flag(sbi, SBI_NEED_FSCK); + } trace_f2fs_truncate_inode_blocks_exit(inode, level); return level; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 947849e66b0a..72bbdb29e838 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -2666,7 +2666,7 @@ bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno) * This function always allocates a used segment(from dirty seglist) by SSR * manner, so it should recover the existing segment information of valid blocks */ -static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush) +static void change_curseg(struct f2fs_sb_info *sbi, int type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); @@ -2674,9 +2674,8 @@ static void change_curseg(struct f2fs_sb_info *sbi, int type, bool flush) struct f2fs_summary_block *sum_node; struct page *sum_page; - if (flush) - write_sum_page(sbi, curseg->sum_blk, - GET_SUM_BLOCK(sbi, curseg->segno)); + if (curseg->inited) + write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno)); __set_test_and_inuse(sbi, new_segno); @@ -2715,7 +2714,7 @@ static void get_atssr_segment(struct f2fs_sb_info *sbi, int type, struct seg_entry *se = get_seg_entry(sbi, curseg->next_segno); curseg->seg_type = se->type; - change_curseg(sbi, type, true); + change_curseg(sbi, type); } else { /* allocate cold segment by default */ curseg->seg_type = CURSEG_COLD_DATA; @@ -2859,31 +2858,20 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type, return 0; } -/* - * flush out current segment and replace it with new segment - * This function should be returned with success, otherwise BUG - */ -static void allocate_segment_by_default(struct f2fs_sb_info *sbi, - int type, bool force) +static bool need_new_seg(struct f2fs_sb_info *sbi, int type) { struct curseg_info *curseg = CURSEG_I(sbi, type); - if (force) - new_curseg(sbi, type, true); - else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && - curseg->seg_type == CURSEG_WARM_NODE) - new_curseg(sbi, type, false); - else if (curseg->alloc_type == LFS && - is_next_segment_free(sbi, curseg, type) && - likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) - new_curseg(sbi, type, false); - else if (f2fs_need_SSR(sbi) && - get_ssr_segment(sbi, type, SSR, 0)) - change_curseg(sbi, type, true); - else - new_curseg(sbi, type, false); - - stat_inc_seg_type(sbi, curseg); + if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) && + curseg->seg_type == CURSEG_WARM_NODE) + return true; + if (curseg->alloc_type == LFS && + is_next_segment_free(sbi, curseg, type) && + likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED))) + return true; + if (!f2fs_need_SSR(sbi) || !get_ssr_segment(sbi, type, SSR, 0)) + return true; + return false; } void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, @@ -2901,7 +2889,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, goto unlock; if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type, SSR, 0)) - change_curseg(sbi, type, true); + change_curseg(sbi, type); else new_curseg(sbi, type, true); @@ -2936,7 +2924,8 @@ static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type, return; alloc: old_segno = curseg->segno; - SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); + new_curseg(sbi, type, true); + stat_inc_seg_type(sbi, curseg); locate_dirty_segment(sbi, old_segno); } @@ -2967,10 +2956,6 @@ void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) f2fs_up_read(&SM_I(sbi)->curseg_lock); } -static const struct segment_allocation default_salloc_ops = { - .allocate_segment = allocate_segment_by_default, -}; - bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc) { @@ -3291,11 +3276,19 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, update_sit_entry(sbi, old_blkaddr, -1); if (!__has_curseg_space(sbi, curseg)) { - if (from_gc) + /* + * Flush out current segment and replace it with new segment. + */ + if (from_gc) { get_atssr_segment(sbi, type, se->type, AT_SSR, se->mtime); - else - sit_i->s_ops->allocate_segment(sbi, type, false); + } else { + if (need_new_seg(sbi, type)) + new_curseg(sbi, type, false); + else + change_curseg(sbi, type); + stat_inc_seg_type(sbi, curseg); + } } /* * segment dirty status should be updated after segment allocation, @@ -3543,8 +3536,8 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, } } - f2fs_bug_on(sbi, !IS_DATASEG(type)); curseg = CURSEG_I(sbi, type); + f2fs_bug_on(sbi, !IS_DATASEG(curseg->seg_type)); mutex_lock(&curseg->curseg_mutex); down_write(&sit_i->sentry_lock); @@ -3556,7 +3549,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, /* change the current segment */ if (segno != curseg->segno) { curseg->next_segno = segno; - change_curseg(sbi, type, true); + change_curseg(sbi, type); } curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); @@ -3584,7 +3577,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, if (recover_curseg) { if (old_cursegno != curseg->segno) { curseg->next_segno = old_cursegno; - change_curseg(sbi, type, true); + change_curseg(sbi, type); } curseg->next_blkoff = old_blkoff; curseg->alloc_type = old_alloc_type; @@ -4281,9 +4274,6 @@ static int build_sit_info(struct f2fs_sb_info *sbi) return -ENOMEM; #endif - /* init SIT information */ - sit_i->s_ops = &default_salloc_ops; - sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr); sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg; sit_i->written_valid_blocks = 0; diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 17d1723d98a0..5ef5a88f47a0 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -225,10 +225,6 @@ struct sec_entry { unsigned int valid_blocks; /* # of valid blocks in a section */ }; -struct segment_allocation { - void (*allocate_segment)(struct f2fs_sb_info *, int, bool); -}; - #define MAX_SKIP_GC_COUNT 16 struct revoke_entry { @@ -238,8 +234,6 @@ struct revoke_entry { }; struct sit_info { - const struct segment_allocation *s_ops; - block_t sit_base_addr; /* start block address of SIT area */ block_t sit_blocks; /* # of blocks used by SIT area */ block_t written_valid_blocks; /* # of valid blocks in main area */ @@ -584,18 +578,21 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi) } static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, - unsigned int node_blocks, unsigned int dent_blocks) + unsigned int node_blocks, unsigned int data_blocks, + unsigned int dent_blocks) { - unsigned segno, left_blocks; + unsigned int segno, left_blocks, blocks; int i; - /* check current node sections in the worst case. */ - for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) { + /* check current data/node sections in the worst case. */ + for (i = CURSEG_HOT_DATA; i < NR_PERSISTENT_LOG; i++) { segno = CURSEG_I(sbi, i)->segno; left_blocks = CAP_BLKS_PER_SEC(sbi) - get_ckpt_valid_blocks(sbi, segno, true); - if (node_blocks > left_blocks) + + blocks = i <= CURSEG_COLD_DATA ? data_blocks : node_blocks; + if (blocks > left_blocks) return false; } @@ -609,8 +606,9 @@ static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi, } /* - * calculate needed sections for dirty node/dentry - * and call has_curseg_enough_space + * calculate needed sections for dirty node/dentry and call + * has_curseg_enough_space, please note that, it needs to account + * dirty data as well in lfs mode when checkpoint is disabled. */ static inline void __get_secs_required(struct f2fs_sb_info *sbi, unsigned int *lower_p, unsigned int *upper_p, bool *curseg_p) @@ -619,19 +617,30 @@ static inline void __get_secs_required(struct f2fs_sb_info *sbi, get_pages(sbi, F2FS_DIRTY_DENTS) + get_pages(sbi, F2FS_DIRTY_IMETA); unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS); + unsigned int total_data_blocks = 0; unsigned int node_secs = total_node_blocks / CAP_BLKS_PER_SEC(sbi); unsigned int dent_secs = total_dent_blocks / CAP_BLKS_PER_SEC(sbi); + unsigned int data_secs = 0; unsigned int node_blocks = total_node_blocks % CAP_BLKS_PER_SEC(sbi); unsigned int dent_blocks = total_dent_blocks % CAP_BLKS_PER_SEC(sbi); + unsigned int data_blocks = 0; + + if (f2fs_lfs_mode(sbi) && + unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { + total_data_blocks = get_pages(sbi, F2FS_DIRTY_DATA); + data_secs = total_data_blocks / CAP_BLKS_PER_SEC(sbi); + data_blocks = total_data_blocks % CAP_BLKS_PER_SEC(sbi); + } if (lower_p) - *lower_p = node_secs + dent_secs; + *lower_p = node_secs + dent_secs + data_secs; if (upper_p) *upper_p = node_secs + dent_secs + - (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0); + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0) + + (data_blocks ? 1 : 0); if (curseg_p) *curseg_p = has_curseg_enough_space(sbi, - node_blocks, dent_blocks); + node_blocks, data_blocks, dent_blocks); } static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, diff --git a/fs/file.c b/fs/file.c index 48f0b28da524..2eccbb5dcd86 100644 --- a/fs/file.c +++ b/fs/file.c @@ -21,6 +21,7 @@ #include #include #include +#include #include "internal.h" @@ -361,17 +362,25 @@ struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_ho old_fds = old_fdt->fd; new_fds = new_fdt->fd; + /* + * We may be racing against fd allocation from other threads using this + * files_struct, despite holding ->file_lock. + * + * alloc_fd() might have already claimed a slot, while fd_install() + * did not populate it yet. Note the latter operates locklessly, so + * the file can show up as we are walking the array below. + * + * At the same time we know no files will disappear as all other + * operations take the lock. + * + * Instead of trying to placate userspace racing with itself, we + * ref the file if we see it and mark the fd slot as unused otherwise. + */ for (i = open_files; i != 0; i--) { - struct file *f = *old_fds++; + struct file *f = rcu_dereference_raw(*old_fds++); if (f) { get_file(f); } else { - /* - * The fd may be claimed in the fd bitmap but not yet - * instantiated in the files array if a sibling thread - * is partway through open(). So make sure that this - * fd is available to the new process. - */ __clear_open_fd(open_files - i, new_fdt); } rcu_assign_pointer(*new_fds++, f); @@ -624,7 +633,7 @@ static struct file *pick_file(struct files_struct *files, unsigned fd) return NULL; fd = array_index_nospec(fd, fdt->max_fds); - file = fdt->fd[fd]; + file = rcu_dereference_raw(fdt->fd[fd]); if (file) { rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); @@ -1092,7 +1101,7 @@ __releases(&files->file_lock) */ fdt = files_fdtable(files); fd = array_index_nospec(fd, fdt->max_fds); - tofree = fdt->fd[fd]; + tofree = rcu_dereference_raw(fdt->fd[fd]); if (!tofree && fd_is_open(fd, fdt)) goto Ebusy; get_file(file); diff --git a/fs/file_table.c b/fs/file_table.c index dd88701e54a9..cecc866871bc 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -110,7 +110,7 @@ static struct ctl_table fs_stat_sysctls[] = { .data = &sysctl_nr_open, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_douintvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c index cb75c07b5281..ced14ac78cc1 100644 --- a/fs/fscache/volume.c +++ b/fs/fscache/volume.c @@ -322,8 +322,7 @@ maybe_wait: } return; no_wait: - clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags); - wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING); + clear_and_wake_up_bit(FSCACHE_VOLUME_CREATING, &volume->flags); } /* diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 6e71904c396f..dc28c28654d9 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -681,7 +681,6 @@ static int __fuse_dax_break_layouts(struct inode *inode, bool *retry, 0, 0, fuse_wait_dax_page(inode)); } -/* dmap_end == 0 leads to unmapping of whole file */ int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end) { diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index aa2be4c1ea8f..0dbacdd7bb0d 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1068,6 +1068,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir, else if (err == -EINTR) fuse_invalidate_attr(inode); + if (err == -ENOSYS) + err = -EPERM; return err; } @@ -1445,7 +1447,7 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, goto out_err; if (fc->cache_symlinks) - return page_get_link(dentry, inode, callback); + return page_get_link_raw(dentry, inode, callback); err = -ECHILD; if (!dentry) @@ -1712,7 +1714,7 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr, if (FUSE_IS_DAX(inode) && is_truncate) { filemap_invalidate_lock(mapping); fault_blocked = true; - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) { filemap_invalidate_unlock(mapping); return err; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 0df1311afb87..723dd9b94e56 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -240,7 +240,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (dax_truncate) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out_inode_unlock; } @@ -3020,7 +3020,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, inode_lock(inode); if (block_faults) { filemap_invalidate_lock(inode->i_mapping); - err = fuse_dax_break_layouts(inode, 0, 0); + err = fuse_dax_break_layouts(inode, 0, -1); if (err) goto out; } diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 92d41269f1d3..9e4f2ba0ef9d 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -1429,6 +1429,9 @@ static int virtio_fs_get_tree(struct fs_context *fsc) unsigned int virtqueue_size; int err = -EIO; + if (!fsc->source) + return invalf(fsc, "No source specified"); + /* This gets a reference on virtio_fs object. This ptr gets installed * in fc->iq->priv. Once fuse_conn is going away, it calls ->put() * to drop the reference to this object. diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index c367f1678d5d..8b564c685871 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -250,6 +250,7 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask) error = filemap_fdatawait(inode->i_mapping); if (error) goto out; + truncate_inode_pages(inode->i_mapping, 0); if (new_flags & GFS2_DIF_JDATA) gfs2_ordered_del_inode(ip); } diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 6ba8460f5331..428c1db295fa 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -885,11 +885,12 @@ static void run_queue(struct gfs2_glock *gl, const int nonblock) __releases(&gl->gl_lockref.lock) __acquires(&gl->gl_lockref.lock) { - struct gfs2_holder *gh = NULL; + struct gfs2_holder *gh; if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) return; + /* While a demote is in progress, the GLF_LOCK flag must be set. */ GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); if (test_bit(GLF_DEMOTE, &gl->gl_flags) && @@ -901,18 +902,22 @@ __acquires(&gl->gl_lockref.lock) set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); gl->gl_target = gl->gl_demote_state; + do_xmote(gl, NULL, gl->gl_target); + return; } else { if (test_bit(GLF_DEMOTE, &gl->gl_flags)) gfs2_demote_wake(gl); if (do_promote(gl) == 0) goto out_unlock; gh = find_first_waiter(gl); + if (!gh) + goto out_unlock; gl->gl_target = gh->gh_state; if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) do_error(gl, 0); /* Fail queued try locks */ + do_xmote(gl, gh, gl->gl_target); + return; } - do_xmote(gl, gh, gl->gl_target); - return; out_sched: clear_bit(GLF_LOCK, &gl->gl_flags); diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index 6add6ebfef89..cb823a8a6ba9 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -67,6 +67,12 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) else key_len = tree->max_key_len + 1; + if (key_len > sizeof(hfs_btree_key) || key_len < 1) { + memset(key, 0, sizeof(hfs_btree_key)); + pr_err("hfs: Invalid key length: %d\n", key_len); + return; + } + hfs_bnode_read(node, key, off, key_len); } diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 6764afa98a6f..431bdc65f723 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c @@ -418,11 +418,13 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent) goto bail_no_root; res = hfs_cat_find_brec(sb, HFS_ROOT_CNID, &fd); if (!res) { - if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) { + if (fd.entrylength != sizeof(rec.dir)) { res = -EIO; goto bail_hfs_find; } hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength); + if (rec.type != HFS_CDR_DIR) + res = -EIO; } if (res) goto bail_hfs_find; diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 87974d5e6791..079ea80534f7 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -67,6 +67,12 @@ void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off) else key_len = tree->max_key_len + 2; + if (key_len > sizeof(hfsplus_btree_key) || key_len < 1) { + memset(key, 0, sizeof(hfsplus_btree_key)); + pr_err("hfsplus: Invalid key length: %d\n", key_len); + return; + } + hfs_bnode_read(node, key, off, key_len); } diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 7db213cd1eea..3227436f3a4a 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -156,6 +156,7 @@ struct hfsplus_sb_info { /* Runtime variables */ u32 blockoffset; + u32 min_io_size; sector_t part_start; sector_t sect_count; int fs_shift; @@ -306,7 +307,7 @@ struct hfsplus_readdir_data { */ static inline unsigned short hfsplus_min_io_size(struct super_block *sb) { - return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev), + return max_t(unsigned short, HFSPLUS_SB(sb)->min_io_size, HFSPLUS_SECTOR_SIZE); } diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 0b791adf02e5..a51a58db3fef 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -171,6 +171,8 @@ int hfsplus_read_wrapper(struct super_block *sb) if (!blocksize) goto out; + sbi->min_io_size = blocksize; + if (hfsplus_get_last_session(sb, &part_start, &part_size)) goto out; diff --git a/fs/inode.c b/fs/inode.c index 5dd0f81e9721..0036d5c68fd4 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -593,6 +593,7 @@ void dump_mapping(const struct address_space *mapping) struct hlist_node *dentry_first; struct dentry *dentry_ptr; struct dentry dentry; + char fname[64] = {}; unsigned long ino; /* @@ -628,11 +629,14 @@ void dump_mapping(const struct address_space *mapping) return; } + if (strncpy_from_kernel_nofault(fname, dentry.d_name.name, 63) < 0) + strscpy(fname, "", 63); /* - * if dentry is corrupted, the %pd handler may still crash, - * but it's unlikely that we reach here with a corrupt mapping + * Even if strncpy_from_kernel_nofault() succeeded, + * the fname could be unreliable */ - pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops, ino, &dentry); + pr_warn("aops:%ps ino:%lx dentry name(?):\"%s\"\n", + a_ops, ino, fname); } void clear_inode(struct inode *inode) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 47f44b02c17d..70e246f7e8fe 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -907,7 +907,7 @@ static int iomap_write_delalloc_scan(struct inode *inode, } /* move offset to start of next folio in range */ - start_byte = folio_next_index(folio) << PAGE_SHIFT; + start_byte = folio_pos(folio) + folio_size(folio); folio_unlock(folio); folio_put(folio); } diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index eb2f8273e6f1..09df40b612fb 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -147,7 +147,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file, de = tmpde; } /* Basic sanity check, whether name doesn't exceed dir entry */ - if (de_len < de->name_len[0] + + if (de_len < sizeof(struct iso_directory_record) || + de_len < de->name_len[0] + sizeof(struct iso_directory_record)) { printk(KERN_NOTICE "iso9660: Corrupted directory entry" " in block %lu of inode %lu\n", block, diff --git a/fs/isofs/export.c b/fs/isofs/export.c index 35768a63fb1d..421d247fae52 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c @@ -180,7 +180,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb, return NULL; return isofs_export_iget(sb, - fh_len > 2 ? ifid->parent_block : 0, + fh_len > 3 ? ifid->parent_block : 0, ifid->parent_offset, fh_len > 4 ? ifid->parent_generation : 0); } diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 7b34deec8b87..6d02dcad8ffd 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -811,9 +811,9 @@ start_journal_io: /* * If the journal is not located on the file system device, * then we must flush the file system device before we issue - * the commit record + * the commit record and update the journal tail sequence. */ - if (commit_transaction->t_need_data_flush && + if ((commit_transaction->t_need_data_flush || update_tail) && (journal->j_fs_dev != journal->j_dev) && (journal->j_flags & JBD2_BARRIER)) blkdev_issue_flush(journal->j_fs_dev); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 55ada2b88146..41ab2dfd1ac2 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1711,7 +1711,6 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid, /* Log is no longer empty */ write_lock(&journal->j_state_lock); - WARN_ON(!sb->s_sequence); journal->j_flags &= ~JBD2_FLUSHED; write_unlock(&journal->j_state_lock); diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index 4556e4689024..ce63d5fde9c3 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c @@ -654,7 +654,7 @@ static void flush_descriptor(journal_t *journal, set_buffer_jwrite(descriptor); BUFFER_TRACE(descriptor, "write"); set_buffer_dirty(descriptor); - write_dirty_buffer(descriptor, REQ_SYNC); + write_dirty_buffer(descriptor, JBD2_JOURNAL_REQ_FLAGS); } #endif diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c index 79e771ab624f..3bd9d2f3bece 100644 --- a/fs/jffs2/compr_rtime.c +++ b/fs/jffs2/compr_rtime.c @@ -95,6 +95,9 @@ static int jffs2_rtime_decompress(unsigned char *data_in, positions[value]=outpos; if (repeat) { + if ((outpos + repeat) > destlen) { + return 1; + } if (backoffs + repeat >= outpos) { while(repeat) { cpage_out[outpos++] = cpage_out[backoffs++]; diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index acd32f05b519..ef3a1e1b6cb0 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -338,10 +338,9 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl } while(--retlen); mtd_unpoint(c->mtd, jeb->offset, c->sector_size); if (retlen) { - pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08tx\n", - *wordebuf, - jeb->offset + - c->sector_size-retlen * sizeof(*wordebuf)); + *bad_offset = jeb->offset + c->sector_size - retlen * sizeof(*wordebuf); + pr_warn("Newly-erased block contained word 0x%lx at offset 0x%08x\n", + *wordebuf, *bad_offset); return -EIO; } return 0; diff --git a/fs/jfs/jfs_dinode.h b/fs/jfs/jfs_dinode.h index 6b231d0d0071..603aae17a693 100644 --- a/fs/jfs/jfs_dinode.h +++ b/fs/jfs/jfs_dinode.h @@ -96,7 +96,7 @@ struct dinode { #define di_gengen u._file._u1._imap._gengen union { - xtpage_t _xtroot; + xtroot_t _xtroot; struct { u8 unused[16]; /* 16: */ dxd_t _dxd; /* 16: */ diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c index a6b1d748df16..5e32526174e8 100644 --- a/fs/jfs/jfs_dmap.c +++ b/fs/jfs/jfs_dmap.c @@ -204,6 +204,10 @@ int dbMount(struct inode *ipbmap) bmp->db_aglevel = le32_to_cpu(dbmp_le->dn_aglevel); bmp->db_agheight = le32_to_cpu(dbmp_le->dn_agheight); bmp->db_agwidth = le32_to_cpu(dbmp_le->dn_agwidth); + if (!bmp->db_agwidth) { + err = -EINVAL; + goto err_release_metapage; + } bmp->db_agstart = le32_to_cpu(dbmp_le->dn_agstart); bmp->db_agl2size = le32_to_cpu(dbmp_le->dn_agl2size); if (bmp->db_agl2size > L2MAXL2SIZE - L2MAXAG || @@ -1820,6 +1824,9 @@ dbAllocCtl(struct bmap * bmp, s64 nblocks, int l2nb, s64 blkno, s64 * results) return -EIO; dp = (struct dmap *) mp->data; + if (dp->tree.budmin < 0) + return -EIO; + /* try to allocate the blocks. */ rc = dbAllocDmapLev(bmp, dp, (int) nblocks, l2nb, results); @@ -2888,6 +2895,9 @@ static void dbAdjTree(dmtree_t *tp, int leafno, int newval, bool is_ctl) /* bubble the new value up the tree as required. */ for (k = 0; k < le32_to_cpu(tp->dmt_height); k++) { + if (lp == 0) + break; + /* get the index of the first leaf of the 4 leaf * group containing the specified leaf (leafno). */ @@ -3397,7 +3407,7 @@ int dbExtendFS(struct inode *ipbmap, s64 blkno, s64 nblocks) oldl2agsize = bmp->db_agl2size; bmp->db_agl2size = l2agsize; - bmp->db_agsize = 1 << l2agsize; + bmp->db_agsize = (s64)1 << l2agsize; /* compute new number of AG */ agno = bmp->db_numag; @@ -3660,8 +3670,8 @@ void dbFinalizeBmap(struct inode *ipbmap) * system size is not a multiple of the group size). */ inactfree = (inactags && ag_rem) ? - ((inactags - 1) << bmp->db_agl2size) + ag_rem - : inactags << bmp->db_agl2size; + (((s64)inactags - 1) << bmp->db_agl2size) + ag_rem + : ((s64)inactags << bmp->db_agl2size); /* determine how many free blocks are in the active * allocation groups plus the average number of free blocks diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 5d3127ca68a4..93db6eec4465 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -117,7 +117,8 @@ do { \ if (!(RC)) { \ if (((P)->header.nextindex > \ (((BN) == 0) ? DTROOTMAXSLOT : (P)->header.maxslot)) || \ - ((BN) && ((P)->header.maxslot > DTPAGEMAXSLOT))) { \ + ((BN) && (((P)->header.maxslot > DTPAGEMAXSLOT) || \ + ((P)->header.stblindex >= DTPAGEMAXSLOT)))) { \ BT_PUTPAGE(MP); \ jfs_error((IP)->i_sb, \ "DT_GETPAGE: dtree page corrupt\n"); \ @@ -2891,6 +2892,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) stbl = DT_GETSTBL(p); for (i = index; i < p->header.nextindex; i++) { + if (stbl[i] < 0 || stbl[i] > 127) { + jfs_err("JFS: Invalid stbl[%d] = %d for inode %ld, block = %lld", + i, stbl[i], (long)ip->i_ino, (long long)bn); + free_page(dirent_buf); + DT_PUTPAGE(mp); + return -EIO; + } + d = (struct ldtentry *) & p->slot[stbl[i]]; if (((long) jfs_dirent + d->namlen + 1) > @@ -3086,6 +3095,13 @@ static int dtReadFirst(struct inode *ip, struct btstack * btstack) /* get the leftmost entry */ stbl = DT_GETSTBL(p); + + if (stbl[0] < 0 || stbl[0] > 127) { + DT_PUTPAGE(mp); + jfs_error(ip->i_sb, "stbl[0] out of bound\n"); + return -EIO; + } + xd = (pxd_t *) & p->slot[stbl[0]]; /* get the child page block address */ diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index c72e97f06579..9adb29e7862c 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -102,7 +102,7 @@ int diMount(struct inode *ipimap) * allocate/initialize the in-memory inode map control structure */ /* allocate the in-memory inode map control structure. */ - imap = kmalloc(sizeof(struct inomap), GFP_KERNEL); + imap = kzalloc(sizeof(struct inomap), GFP_KERNEL); if (imap == NULL) return -ENOMEM; @@ -456,7 +456,7 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary) dp += inum % 8; /* 8 inodes per 4K page */ /* copy on-disk inode to in-memory inode */ - if ((copy_from_dinode(dp, ip)) != 0) { + if ((copy_from_dinode(dp, ip) != 0) || (ip->i_nlink == 0)) { /* handle bad return by returning NULL for ip */ set_nlink(ip, 1); /* Don't want iput() deleting it */ iput(ip); @@ -673,7 +673,7 @@ int diWrite(tid_t tid, struct inode *ip) * This is the special xtree inside the directory for storing * the directory table */ - xtpage_t *p, *xp; + xtroot_t *p, *xp; xad_t *xad; jfs_ip->xtlid = 0; @@ -687,7 +687,7 @@ int diWrite(tid_t tid, struct inode *ip) * copy xtree root from inode to dinode: */ p = &jfs_ip->i_xtroot; - xp = (xtpage_t *) &dp->di_dirtable; + xp = (xtroot_t *) &dp->di_dirtable; lv = ilinelock->lv; for (n = 0; n < ilinelock->index; n++, lv++) { memcpy(&xp->xad[lv->offset], &p->xad[lv->offset], @@ -716,7 +716,7 @@ int diWrite(tid_t tid, struct inode *ip) * regular file: 16 byte (XAD slot) granularity */ if (type & tlckXTREE) { - xtpage_t *p, *xp; + xtroot_t *p, *xp; xad_t *xad; /* diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h index 721def69e732..dd4264aa9bed 100644 --- a/fs/jfs/jfs_incore.h +++ b/fs/jfs/jfs_incore.h @@ -66,7 +66,7 @@ struct jfs_inode_info { lid_t xtlid; /* lid of xtree lock on directory */ union { struct { - xtpage_t _xtroot; /* 288: xtree root */ + xtroot_t _xtroot; /* 288: xtree root */ struct inomap *_imap; /* 4: inode map header */ } file; struct { diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index ce4b4760fcb1..dccc8b3f1045 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -783,7 +783,7 @@ struct tlock *txLock(tid_t tid, struct inode *ip, struct metapage * mp, if (mp->xflag & COMMIT_PAGE) p = (xtpage_t *) mp->data; else - p = &jfs_ip->i_xtroot; + p = (xtpage_t *) &jfs_ip->i_xtroot; xtlck->lwm.offset = le16_to_cpu(p->header.nextindex); } @@ -1676,7 +1676,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, if (tlck->type & tlckBTROOT) { lrd->log.redopage.type |= cpu_to_le16(LOG_BTROOT); - p = &JFS_IP(ip)->i_xtroot; + p = (xtpage_t *) &JFS_IP(ip)->i_xtroot; if (S_ISDIR(ip->i_mode)) lrd->log.redopage.type |= cpu_to_le16(LOG_DIR_XTREE); diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c index 2d304cee884c..5ee618d17e77 100644 --- a/fs/jfs/jfs_xtree.c +++ b/fs/jfs/jfs_xtree.c @@ -1213,7 +1213,7 @@ xtSplitRoot(tid_t tid, struct xtlock *xtlck; int rc; - sp = &JFS_IP(ip)->i_xtroot; + sp = (xtpage_t *) &JFS_IP(ip)->i_xtroot; INCREMENT(xtStat.split); @@ -2098,7 +2098,7 @@ int xtAppend(tid_t tid, /* transaction id */ */ void xtInitRoot(tid_t tid, struct inode *ip) { - xtpage_t *p; + xtroot_t *p; /* * acquire a transaction lock on the root diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h index 142caafc73b1..15da4e16d8b2 100644 --- a/fs/jfs/jfs_xtree.h +++ b/fs/jfs/jfs_xtree.h @@ -65,24 +65,33 @@ struct xadlist { #define XTPAGEMAXSLOT 256 #define XTENTRYSTART 2 +struct xtheader { + __le64 next; /* 8: */ + __le64 prev; /* 8: */ + + u8 flag; /* 1: */ + u8 rsrvd1; /* 1: */ + __le16 nextindex; /* 2: next index = number of entries */ + __le16 maxentry; /* 2: max number of entries */ + __le16 rsrvd2; /* 2: */ + + pxd_t self; /* 8: self */ +}; + +/* + * xtree root (in inode): + */ +typedef union { + struct xtheader header; + xad_t xad[XTROOTMAXSLOT]; /* 16 * maxentry: xad array */ +} xtroot_t; + /* * xtree page: */ typedef union { - struct xtheader { - __le64 next; /* 8: */ - __le64 prev; /* 8: */ - - u8 flag; /* 1: */ - u8 rsrvd1; /* 1: */ - __le16 nextindex; /* 2: next index = number of entries */ - __le16 maxentry; /* 2: max number of entries */ - __le16 rsrvd2; /* 2: */ - - pxd_t self; /* 8: self */ - } header; /* (32) */ - - xad_t xad[XTROOTMAXSLOT]; /* 16 * maxentry: xad array */ + struct xtheader header; + xad_t xad[XTPAGEMAXSLOT]; /* 16 * maxentry: xad array */ } xtpage_t; /* diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 76b89718fd52..e6f2c619b30a 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -559,11 +559,16 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) size_check: if (EALIST_SIZE(ea_buf->xattr) != ea_size) { - int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size); + if (unlikely(EALIST_SIZE(ea_buf->xattr) > INT_MAX)) { + printk(KERN_ERR "ea_get: extended attribute size too large: %u > INT_MAX\n", + EALIST_SIZE(ea_buf->xattr)); + } else { + int size = clamp_t(int, ea_size, 0, EALIST_SIZE(ea_buf->xattr)); - printk(KERN_ERR "ea_get: invalid extended attribute\n"); - print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, - ea_buf->xattr, size, 1); + printk(KERN_ERR "ea_get: invalid extended attribute\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, + ea_buf->xattr, size, 1); + } ea_release(inode, ea_buf); rc = -EIO; goto clean_up; diff --git a/fs/namei.c b/fs/namei.c index 166d71c82d7a..6ce07cde1c27 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -5156,10 +5156,9 @@ const char *vfs_get_link(struct dentry *dentry, struct delayed_call *done) EXPORT_SYMBOL(vfs_get_link); /* get the link contents into pagecache */ -const char *page_get_link(struct dentry *dentry, struct inode *inode, - struct delayed_call *callback) +static char *__page_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) { - char *kaddr; struct page *page; struct address_space *mapping = inode->i_mapping; @@ -5178,8 +5177,23 @@ const char *page_get_link(struct dentry *dentry, struct inode *inode, } set_delayed_call(callback, page_put_link, page); BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); - kaddr = page_address(page); - nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); + return page_address(page); +} + +const char *page_get_link_raw(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) +{ + return __page_get_link(dentry, inode, callback); +} +EXPORT_SYMBOL_GPL(page_get_link_raw); + +const char *page_get_link(struct dentry *dentry, struct inode *inode, + struct delayed_call *callback) +{ + char *kaddr = __page_get_link(dentry, inode, callback); + + if (!IS_ERR(kaddr)) + nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); return kaddr; } diff --git a/fs/namespace.c b/fs/namespace.c index 59a9f877738b..211a81240680 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -629,15 +629,11 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq) return 0; mnt = real_mount(bastard); mnt_add_count(mnt, 1); - smp_mb(); // see mntput_no_expire() + smp_mb(); // see mntput_no_expire() and do_umount() if (likely(!read_seqretry(&mount_lock, seq))) return 0; - if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { - mnt_add_count(mnt, -1); - return 1; - } lock_mount_hash(); - if (unlikely(bastard->mnt_flags & MNT_DOOMED)) { + if (unlikely(bastard->mnt_flags & (MNT_SYNC_UMOUNT | MNT_DOOMED))) { mnt_add_count(mnt, -1); unlock_mount_hash(); return 1; @@ -1707,6 +1703,7 @@ static int do_umount(struct mount *mnt, int flags) umount_tree(mnt, UMOUNT_PROPAGATE); retval = 0; } else { + smp_mb(); // paired with __legitimize_mnt() shrink_submounts(mnt); retval = -EBUSY; if (!propagate_mount_busy(mnt, 2)) { @@ -1777,6 +1774,7 @@ static void warn_mandlock(void) static int can_umount(const struct path *path, int flags) { struct mount *mnt = real_mount(path->mnt); + struct super_block *sb = path->dentry->d_sb; if (!may_mount()) return -EPERM; @@ -1786,7 +1784,7 @@ static int can_umount(const struct path *path, int flags) return -EINVAL; if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ return -EINVAL; - if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) + if (flags & MNT_FORCE && !ns_capable(sb->s_user_ns, CAP_SYS_ADMIN)) return -EPERM; return 0; } diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 14a72224b657..899e25e9b4eb 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -2,6 +2,7 @@ config NFS_FS tristate "NFS client support" depends on INET && FILE_LOCKING && MULTIUSER + select CRC32 select LOCKD select SUNRPC select NFS_ACL_SUPPORT if NFS_V3_ACL @@ -194,7 +195,6 @@ config NFS_USE_KERNEL_DNS config NFS_DEBUG bool depends on NFS_FS && SUNRPC_DEBUG - select CRC32 default y config NFS_DISABLE_UDP_SUPPORT diff --git a/fs/nfs/client.c b/fs/nfs/client.c index a8930e6c417f..de4ad41b14e2 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1052,6 +1052,8 @@ struct nfs_server *nfs_create_server(struct fs_context *fc) if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) server->namelen = NFS2_MAXNAMLEN; } + /* Linux 'subtree_check' borkenness mandates this setting */ + server->fh_expire_type = NFS_FH_VOL_RENAME; if (!(fattr->valid & NFS_ATTR_FATTR)) { error = ctx->nfs_mod->rpc_ops->getattr(server, ctx->mntfh, diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 39c697e100b1..17b38da17288 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -297,7 +297,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi) if (delegation == NULL) goto out; spin_lock(&delegation->lock); - if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { + if (delegation->inode && + !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) { clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags); /* Refcount matched in nfs_end_delegation_return() */ ret = nfs_get_delegation(delegation); @@ -570,17 +571,6 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation) if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags)) ret = true; - else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) { - struct inode *inode; - - spin_lock(&delegation->lock); - inode = delegation->inode; - if (inode && list_empty(&NFS_I(inode)->open_files)) - ret = true; - spin_unlock(&delegation->lock); - } - if (ret) - clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) || test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) || test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) @@ -821,11 +811,25 @@ int nfs4_inode_make_writeable(struct inode *inode) return nfs4_inode_return_delegation(inode); } -static void nfs_mark_return_if_closed_delegation(struct nfs_server *server, - struct nfs_delegation *delegation) +static void +nfs_mark_return_if_closed_delegation(struct nfs_server *server, + struct nfs_delegation *delegation) { - set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); - set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state); + struct inode *inode; + + if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) || + test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) + return; + spin_lock(&delegation->lock); + inode = delegation->inode; + if (!inode) + goto out; + if (list_empty(&NFS_I(inode)->open_files)) + nfs_mark_return_delegation(server, delegation); + else + set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags); +out: + spin_unlock(&delegation->lock); } static bool nfs_server_mark_return_all_delegations(struct nfs_server *server) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 70660ff248b7..1876978107ca 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -2632,6 +2632,18 @@ nfs_unblock_rename(struct rpc_task *task, struct nfs_renamedata *data) unblock_revalidate(new_dentry); } +static bool nfs_rename_is_unsafe_cross_dir(struct dentry *old_dentry, + struct dentry *new_dentry) +{ + struct nfs_server *server = NFS_SB(old_dentry->d_sb); + + if (old_dentry->d_parent != new_dentry->d_parent) + return false; + if (server->fh_expire_type & NFS_FH_RENAME_UNSAFE) + return !(server->fh_expire_type & NFS_FH_NOEXPIRE_WITH_OPEN); + return true; +} + /* * RENAME * FIXME: Some nfsds, like the Linux user space nfsd, may generate a @@ -2719,7 +2731,8 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir, } - if (S_ISREG(old_inode->i_mode)) + if (S_ISREG(old_inode->i_mode) && + nfs_rename_is_unsafe_cross_dir(old_dentry, new_dentry)) nfs_sync_inode(old_inode); task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, must_unblock ? nfs_unblock_rename : NULL); diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index acf4b88889dc..d5f1fbfd9a0c 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c @@ -75,6 +75,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, struct page *scratch; struct list_head dsaddrs; struct nfs4_pnfs_ds_addr *da; + struct net *net = server->nfs_client->cl_net; /* set up xdr stream */ scratch = alloc_page(gfp_flags); @@ -158,8 +159,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, mp_count = be32_to_cpup(p); /* multipath count */ for (j = 0; j < mp_count; j++) { - da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, - &stream, gfp_flags); + da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } @@ -169,7 +169,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, goto out_err_free_deviceid; } - dsaddr->ds_list[i] = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); + dsaddr->ds_list[i] = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags); if (!dsaddr->ds_list[i]) goto out_err_drain_dsaddrs; diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 4376881be791..07e5ea64dcd6 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -839,6 +839,9 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs4_pnfs_ds *ds; u32 ds_idx; + if (NFS_SERVER(pgio->pg_inode)->flags & + (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) + pgio->pg_maxretrans = io_maxretrans; retry: ff_layout_pg_check_layout(pgio, req); /* Use full layout for now */ @@ -852,6 +855,8 @@ retry: if (!pgio->pg_lseg) goto out_nolseg; } + /* Reset wb_nio, since getting layout segment was successful */ + req->wb_nio = 0; ds = ff_layout_get_ds_for_read(pgio, &ds_idx); if (!ds) { @@ -868,14 +873,24 @@ retry: pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; pgio->pg_mirror_idx = ds_idx; - - if (NFS_SERVER(pgio->pg_inode)->flags & - (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) - pgio->pg_maxretrans = io_maxretrans; return; out_nolseg: - if (pgio->pg_error < 0) - return; + if (pgio->pg_error < 0) { + if (pgio->pg_error != -EAGAIN) + return; + /* Retry getting layout segment if lower layer returned -EAGAIN */ + if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) { + if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR) + pgio->pg_error = -ETIMEDOUT; + else + pgio->pg_error = -EIO; + return; + } + pgio->pg_error = 0; + /* Sleep for 1 second before retrying */ + ssleep(1); + goto retry; + } out_mds: trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode, 0, NFS4_MAX_UINT64, IOMODE_READ, @@ -1240,6 +1255,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, case -ECONNRESET: case -EHOSTDOWN: case -EHOSTUNREACH: + case -ENETDOWN: case -ENETUNREACH: case -EADDRINUSE: case -ENOBUFS: diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index e028f5a0ef5f..d21c5ecfbf1c 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -49,6 +49,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, struct nfs4_pnfs_ds_addr *da; struct nfs4_ff_layout_ds *new_ds = NULL; struct nfs4_ff_ds_version *ds_versions = NULL; + struct net *net = server->nfs_client->cl_net; u32 mp_count; u32 version_count; __be32 *p; @@ -80,8 +81,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, for (i = 0; i < mp_count; i++) { /* multipath ds */ - da = nfs4_decode_mp_ds_addr(server->nfs_client->cl_net, - &stream, gfp_flags); + da = nfs4_decode_mp_ds_addr(net, &stream, gfp_flags); if (da) list_add_tail(&da->da_node, &dsaddrs); } @@ -149,7 +149,7 @@ nfs4_ff_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev, new_ds->ds_versions = ds_versions; new_ds->ds_versions_cnt = version_count; - new_ds->ds = nfs4_pnfs_ds_add(&dsaddrs, gfp_flags); + new_ds->ds = nfs4_pnfs_ds_add(net, &dsaddrs, gfp_flags); if (!new_ds->ds) goto out_err_drain_dsaddrs; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 964df0725f4c..f2e66b946f4b 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -74,6 +74,8 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr) int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) { + if (unlikely(nfs_current_task_exiting())) + return -EINTR; schedule(); if (signal_pending_state(mode, current)) return -ERESTARTSYS; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index d921d7b7bec6..84361674bffc 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -11,7 +11,7 @@ #include #include -#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS) +#define NFS_SB_MASK (SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS) extern const struct export_operations nfs_export_ops; @@ -826,33 +826,16 @@ u64 nfs_timespec_to_change_attr(const struct timespec64 *ts) return ((u64)ts->tv_sec << 30) + ts->tv_nsec; } -#ifdef CONFIG_CRC32 -/** - * nfs_fhandle_hash - calculate the crc32 hash for the filehandle - * @fh - pointer to filehandle - * - * returns a crc32 hash for the filehandle that is compatible with - * the one displayed by "wireshark". - */ -static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh) -{ - return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size); -} static inline u32 nfs_stateid_hash(const nfs4_stateid *stateid) { return ~crc32_le(0xFFFFFFFF, &stateid->other[0], NFS4_STATEID_OTHER_SIZE); } -#else -static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh) + +static inline bool nfs_current_task_exiting(void) { - return 0; + return (current->flags & PF_EXITING) != 0; } -static inline u32 nfs_stateid_hash(nfs4_stateid *stateid) -{ - return 0; -} -#endif static inline bool nfs_error_is_fatal(int err) { diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index 2e7579626cf0..f036d30f7515 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -39,7 +39,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); schedule_timeout(NFS_JUKEBOX_RETRY_TIME); res = -ERESTARTSYS; - } while (!fatal_signal_pending(current)); + } while (!fatal_signal_pending(current) && !nfs_current_task_exiting()); return res; } diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 89c32a963dd1..923ccd3b540f 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -551,7 +551,7 @@ static int nfs42_do_offload_cancel_async(struct file *dst, .rpc_message = &msg, .callback_ops = &nfs42_offload_cancel_ops, .workqueue = nfsiod_workqueue, - .flags = RPC_TASK_ASYNC, + .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE, }; int status; diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 20aa5e746497..fd51fd1cf5ff 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -129,9 +129,11 @@ decode_putfh_maxsz + \ decode_offload_cancel_maxsz) #define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ encode_putfh_maxsz + \ encode_copy_notify_maxsz) #define NFS4_dec_copy_notify_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ decode_putfh_maxsz + \ decode_copy_notify_maxsz) #define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \ diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1e97de7c8c20..0f28607c5747 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -422,6 +422,8 @@ static int nfs4_delay_killable(long *timeout) { might_sleep(); + if (unlikely(nfs_current_task_exiting())) + return -EINTR; __set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); schedule_timeout(nfs4_update_delay(timeout)); if (!__fatal_signal_pending(current)) @@ -433,6 +435,8 @@ static int nfs4_delay_interruptible(long *timeout) { might_sleep(); + if (unlikely(nfs_current_task_exiting())) + return -EINTR; __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE); schedule_timeout(nfs4_update_delay(timeout)); if (!signal_pending(current)) @@ -1712,7 +1716,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, rcu_read_unlock(); trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0); - if (!fatal_signal_pending(current)) { + if (!fatal_signal_pending(current) && + !nfs_current_task_exiting()) { if (schedule_timeout(5*HZ) == 0) status = -EAGAIN; else @@ -2528,12 +2533,14 @@ static void nfs4_open_release(void *calldata) struct nfs4_opendata *data = calldata; struct nfs4_state *state = NULL; + /* In case of error, no cleanup! */ + if (data->rpc_status != 0 || !data->rpc_done) { + nfs_release_seqid(data->o_arg.seqid); + goto out_free; + } /* If this request hasn't been cancelled, do nothing */ if (!data->cancelled) goto out_free; - /* In case of error, no cleanup! */ - if (data->rpc_status != 0 || !data->rpc_done) - goto out_free; /* In case we need an open_confirm, no cleanup! */ if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) goto out_free; @@ -3498,7 +3505,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst, write_sequnlock(&state->seqlock); trace_nfs4_close_stateid_update_wait(state->inode, dst, 0); - if (fatal_signal_pending(current)) + if (fatal_signal_pending(current) || nfs_current_task_exiting()) status = -EINTR; else if (schedule_timeout(5*HZ) != 0) @@ -6884,10 +6891,18 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, struct nfs4_unlockdata *p; struct nfs4_state *state = lsp->ls_state; struct inode *inode = state->inode; + struct nfs_lock_context *l_ctx; p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return NULL; + l_ctx = nfs_get_lock_context(ctx); + if (!IS_ERR(l_ctx)) { + p->l_ctx = l_ctx; + } else { + kfree(p); + return NULL; + } p->arg.fh = NFS_FH(inode); p->arg.fl = &p->fl; p->arg.seqid = seqid; @@ -6895,7 +6910,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, p->lsp = lsp; /* Ensure we don't close file until we're done freeing locks! */ p->ctx = get_nfs_open_context(ctx); - p->l_ctx = nfs_get_lock_context(ctx); locks_init_lock(&p->fl); locks_copy_lock(&p->fl, fl); p->server = NFS_SERVER(inode); diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index 351616c61df5..f9c291e2165c 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -148,16 +148,12 @@ static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst, memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN); } -#ifdef CONFIG_CRC32 /* * nfs_session_id_hash - calculate the crc32 hash for the session id * @session - pointer to session */ #define nfs_session_id_hash(sess_id) \ (~crc32_le(0xFFFFFFFF, &(sess_id)->data[0], sizeof((sess_id)->data))) -#else -#define nfs_session_id_hash(session) (0) -#endif #else /* defined(CONFIG_NFS_V4_1) */ static inline int nfs4_init_session(struct nfs_client *clp) diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 48ea40660422..80a7c5bd7a47 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -2737,7 +2737,15 @@ out_error: pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s" " with error %d\n", section_sep, section, clp->cl_hostname, -status); - ssleep(1); + switch (status) { + case -ENETDOWN: + case -ENETUNREACH: + nfs_mark_client_ready(clp, -EIO); + break; + default: + ssleep(1); + break; + } out_drain: memalloc_nofs_restore(memflags); nfs4_end_drain_session(clp); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index c96d2e76156e..fe0ddbce3bcb 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -732,6 +732,14 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo, return remaining; } +static void pnfs_reset_return_info(struct pnfs_layout_hdr *lo) +{ + struct pnfs_layout_segment *lseg; + + list_for_each_entry(lseg, &lo->plh_return_segs, pls_list) + pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0); +} + static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo, struct list_head *free_me, @@ -1180,6 +1188,7 @@ void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo, pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq); pnfs_free_returned_lsegs(lo, &freeme, range, seq); pnfs_set_layout_stateid(lo, stateid, NULL, true); + pnfs_reset_return_info(lo); } else pnfs_mark_layout_stateid_invalid(lo, &freeme); out_unlock: @@ -1196,7 +1205,7 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo, enum pnfs_iomode *iomode) { /* Serialise LAYOUTGET/LAYOUTRETURN */ - if (atomic_read(&lo->plh_outstanding) != 0) + if (atomic_read(&lo->plh_outstanding) != 0 && lo->plh_return_seq == 0) return false; if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) return false; diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index e3e6a41f19de..f5173c188184 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -59,6 +59,7 @@ struct nfs4_pnfs_ds { struct list_head ds_node; /* nfs4_pnfs_dev_hlist dev_dslist */ char *ds_remotestr; /* comma sep list of addrs */ struct list_head ds_addrs; + const struct net *ds_net; struct nfs_client *ds_clp; refcount_t ds_count; unsigned long ds_state; @@ -405,7 +406,8 @@ int pnfs_generic_commit_pagelist(struct inode *inode, int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max); void pnfs_generic_write_commit_done(struct rpc_task *task, void *data); void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); -struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, +struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(const struct net *net, + struct list_head *dsaddrs, gfp_t gfp_flags); void nfs4_pnfs_v3_ds_connect_unload(void); int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 47a8da3f5c9f..31afa88742f6 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -651,12 +651,12 @@ _same_data_server_addrs_locked(const struct list_head *dsaddrs1, * Lookup DS by addresses. nfs4_ds_cache_lock is held */ static struct nfs4_pnfs_ds * -_data_server_lookup_locked(const struct list_head *dsaddrs) +_data_server_lookup_locked(const struct net *net, const struct list_head *dsaddrs) { struct nfs4_pnfs_ds *ds; list_for_each_entry(ds, &nfs4_data_server_cache, ds_node) - if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) + if (ds->ds_net == net && _same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs)) return ds; return NULL; } @@ -763,7 +763,7 @@ out_err: * uncached and return cached struct nfs4_pnfs_ds. */ struct nfs4_pnfs_ds * -nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) +nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags) { struct nfs4_pnfs_ds *tmp_ds, *ds = NULL; char *remotestr; @@ -781,13 +781,14 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags) remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags); spin_lock(&nfs4_ds_cache_lock); - tmp_ds = _data_server_lookup_locked(dsaddrs); + tmp_ds = _data_server_lookup_locked(net, dsaddrs); if (tmp_ds == NULL) { INIT_LIST_HEAD(&ds->ds_addrs); list_splice_init(dsaddrs, &ds->ds_addrs); ds->ds_remotestr = remotestr; refcount_set(&ds->ds_count, 1); INIT_LIST_HEAD(&ds->ds_node); + ds->ds_net = net; ds->ds_clp = NULL; list_add(&ds->ds_node, &nfs4_data_server_cache); dprintk("%s add new data server %s\n", __func__, diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig index 7c441f2bd444..4f704f868d9c 100644 --- a/fs/nfsd/Kconfig +++ b/fs/nfsd/Kconfig @@ -4,6 +4,7 @@ config NFSD depends on INET depends on FILE_LOCKING depends on FSNOTIFY + select CRC32 select LOCKD select SUNRPC select EXPORTFS diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c index 16fadade86cc..39228bd7492a 100644 --- a/fs/nfsd/export.c +++ b/fs/nfsd/export.c @@ -1324,9 +1324,12 @@ static int e_show(struct seq_file *m, void *p) return 0; } - exp_get(exp); + if (!cache_get_rcu(&exp->h)) + return 0; + if (cache_check(cd, &exp->h, NULL)) return 0; + exp_put(exp); return svc_export_show(m, cd, cp); } diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index c9393c732889..46eb06e32f0c 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -219,6 +219,7 @@ nfsd_file_alloc(struct net *net, struct inode *inode, unsigned char need, return NULL; INIT_LIST_HEAD(&nf->nf_lru); + INIT_LIST_HEAD(&nf->nf_gc); nf->nf_birthtime = ktime_get(); nf->nf_file = NULL; nf->nf_cred = get_current_cred(); @@ -396,8 +397,8 @@ nfsd_file_dispose_list(struct list_head *dispose) struct nfsd_file *nf; while (!list_empty(dispose)) { - nf = list_first_entry(dispose, struct nfsd_file, nf_lru); - list_del_init(&nf->nf_lru); + nf = list_first_entry(dispose, struct nfsd_file, nf_gc); + list_del_init(&nf->nf_gc); nfsd_file_free(nf); } } @@ -414,12 +415,12 @@ nfsd_file_dispose_list_delayed(struct list_head *dispose) { while(!list_empty(dispose)) { struct nfsd_file *nf = list_first_entry(dispose, - struct nfsd_file, nf_lru); + struct nfsd_file, nf_gc); struct nfsd_net *nn = net_generic(nf->nf_net, nfsd_net_id); struct nfsd_fcache_disposal *l = nn->fcache_disposal; spin_lock(&l->lock); - list_move_tail(&nf->nf_lru, &l->freeme); + list_move_tail(&nf->nf_gc, &l->freeme); spin_unlock(&l->lock); queue_work(nfsd_filecache_wq, &l->work); } @@ -476,7 +477,8 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru, /* Refcount went to zero. Unhash it and queue it to the dispose list */ nfsd_file_unhash(nf); - list_lru_isolate_move(lru, &nf->nf_lru, head); + list_lru_isolate(lru, &nf->nf_lru); + list_add(&nf->nf_gc, head); this_cpu_inc(nfsd_file_evictions); trace_nfsd_file_gc_disposed(nf); return LRU_REMOVED; @@ -555,7 +557,7 @@ nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose) /* If refcount goes to 0, then put on the dispose list */ if (refcount_sub_and_test(decrement, &nf->nf_ref)) { - list_add(&nf->nf_lru, dispose); + list_add(&nf->nf_gc, dispose); trace_nfsd_file_closing(nf); } } @@ -631,8 +633,8 @@ nfsd_file_close_inode_sync(struct inode *inode) nfsd_file_queue_for_close(inode, &dispose); while (!list_empty(&dispose)) { - nf = list_first_entry(&dispose, struct nfsd_file, nf_lru); - list_del_init(&nf->nf_lru); + nf = list_first_entry(&dispose, struct nfsd_file, nf_gc); + list_del_init(&nf->nf_gc); nfsd_file_free(nf); } flush_delayed_fput(); diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h index e54165a3224f..bf7a630f1a45 100644 --- a/fs/nfsd/filecache.h +++ b/fs/nfsd/filecache.h @@ -44,6 +44,7 @@ struct nfsd_file { struct nfsd_file_mark *nf_mark; struct list_head nf_lru; + struct list_head nf_gc; struct rcu_head nf_rcu; ktime_t nf_birthtime; }; diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 548422b24a7d..41c750f34473 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -152,6 +152,7 @@ struct nfsd_net { u32 s2s_cp_cl_id; struct idr s2s_cp_stateids; spinlock_t s2s_cp_lock; + atomic_t pending_async_copies; /* * Version information diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c index 65d4511b7af0..6c4fe9409611 100644 --- a/fs/nfsd/nfs2acl.c +++ b/fs/nfsd/nfs2acl.c @@ -84,6 +84,8 @@ out: fail: posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); + resp->acl_access = NULL; + resp->acl_default = NULL; goto out; } diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c index a34a22e272ad..e6bb621f1ffd 100644 --- a/fs/nfsd/nfs3acl.c +++ b/fs/nfsd/nfs3acl.c @@ -76,6 +76,8 @@ out: fail: posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); + resp->acl_access = NULL; + resp->acl_default = NULL; goto out; } diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 4eae2c5af2ed..272d3facfff9 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -297,17 +297,17 @@ static int decode_cb_compound4res(struct xdr_stream *xdr, u32 length; __be32 *p; - p = xdr_inline_decode(xdr, 4 + 4); + p = xdr_inline_decode(xdr, XDR_UNIT); if (unlikely(p == NULL)) goto out_overflow; - hdr->status = be32_to_cpup(p++); + hdr->status = be32_to_cpup(p); /* Ignore the tag */ - length = be32_to_cpup(p++); - p = xdr_inline_decode(xdr, length + 4); - if (unlikely(p == NULL)) + if (xdr_stream_decode_u32(xdr, &length) < 0) + goto out_overflow; + if (xdr_inline_decode(xdr, length) == NULL) + goto out_overflow; + if (xdr_stream_decode_u32(xdr, &hdr->nops) < 0) goto out_overflow; - p += XDR_QUADLEN(length); - hdr->nops = be32_to_cpup(p); return 0; out_overflow: return -EIO; @@ -986,7 +986,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c args.authflavor = clp->cl_cred.cr_flavor; clp->cl_cb_ident = conn->cb_ident; } else { - if (!conn->cb_xprt) + if (!conn->cb_xprt || !ses) return -EINVAL; clp->cl_cb_session = ses; args.bc_xprt = conn->cb_xprt; @@ -1202,6 +1202,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback ret = false; break; case -NFS4ERR_DELAY: + cb->cb_seq_status = 1; if (!rpc_restart_call(task)) goto out; @@ -1409,8 +1410,11 @@ nfsd4_run_cb_work(struct work_struct *work) nfsd4_process_cb_update(cb); clnt = clp->cl_cb_client; - if (!clnt) { - /* Callback channel broken, or client killed; give up: */ + if (!clnt || clp->cl_state == NFSD4_COURTESY) { + /* + * Callback channel broken, client killed or + * nfs4_client in courtesy state; give up. + */ nfsd41_destroy_cb(cb); return; } diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index df9dbd93663e..6eb02390bd42 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -717,15 +717,6 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, &access->ac_supported); } -static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net) -{ - __be32 *verf = (__be32 *)verifier->data; - - BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data)); - - nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id)); -} - static __be32 nfsd4_commit(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) @@ -1252,6 +1243,7 @@ static void nfs4_put_copy(struct nfsd4_copy *copy) { if (!refcount_dec_and_test(©->refcount)) return; + atomic_dec(©->cp_nn->pending_async_copies); kfree(copy->cp_src); kfree(copy); } @@ -1263,7 +1255,7 @@ static void nfsd4_stop_copy(struct nfsd4_copy *copy) nfs4_put_copy(copy); } -static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp) +static struct nfsd4_copy *nfsd4_unhash_copy(struct nfs4_client *clp) { struct nfsd4_copy *copy = NULL; @@ -1272,6 +1264,9 @@ static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp) copy = list_first_entry(&clp->async_copies, struct nfsd4_copy, copies); refcount_inc(©->refcount); + copy->cp_clp = NULL; + if (!list_empty(©->copies)) + list_del_init(©->copies); } spin_unlock(&clp->async_lock); return copy; @@ -1281,7 +1276,7 @@ void nfsd4_shutdown_copy(struct nfs4_client *clp) { struct nfsd4_copy *copy; - while ((copy = nfsd4_get_copy(clp)) != NULL) + while ((copy = nfsd4_unhash_copy(clp)) != NULL) nfsd4_stop_copy(copy); } #ifdef CONFIG_NFSD_V4_2_INTER_SSC @@ -1593,7 +1588,6 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync) test_bit(NFSD4_COPY_F_COMMITTED, ©->cp_flags) ? NFS_FILE_SYNC : NFS_UNSTABLE; nfsd4_copy_set_sync(copy, sync); - gen_boot_verifier(©->cp_res.wr_verifier, copy->cp_clp->net); } static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy, @@ -1764,10 +1758,16 @@ static __be32 nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { - struct nfsd4_copy *copy = &u->copy; - __be32 status; + struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); struct nfsd4_copy *async_copy = NULL; + struct nfsd4_copy *copy = &u->copy; + struct nfsd42_write_res *result; + __be32 status; + result = ©->cp_res; + nfsd_copy_write_verifier((__be32 *)&result->wr_verifier.data, nn); + + copy->cp_clp = cstate->clp; if (nfsd4_ssc_is_inter(copy)) { if (!inter_copy_offload_enable || nfsd4_copy_is_sync(copy)) { status = nfserr_notsupp; @@ -1782,25 +1782,26 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, return status; } - copy->cp_clp = cstate->clp; memcpy(©->fh, &cstate->current_fh.fh_handle, sizeof(struct knfsd_fh)); if (nfsd4_copy_is_async(copy)) { - struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); - - status = nfserrno(-ENOMEM); async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL); if (!async_copy) goto out_err; + async_copy->cp_nn = nn; INIT_LIST_HEAD(&async_copy->copies); refcount_set(&async_copy->refcount, 1); + /* Arbitrary cap on number of pending async copy operations */ + if (atomic_inc_return(&nn->pending_async_copies) > + (int)rqstp->rq_pool->sp_nrthreads) + goto out_err; async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL); if (!async_copy->cp_src) goto out_err; if (!nfs4_init_copy_state(nn, copy)) goto out_err; - memcpy(©->cp_res.cb_stateid, ©->cp_stateid.cs_stid, - sizeof(copy->cp_res.cb_stateid)); + memcpy(&result->cb_stateid, ©->cp_stateid.cs_stid, + sizeof(result->cb_stateid)); dup_copy_fields(copy, async_copy); async_copy->copy_task = kthread_create(nfsd4_do_async_copy, async_copy, "%s", "copy thread"); @@ -1830,7 +1831,7 @@ out_err: } if (async_copy) cleanup_async_copy(async_copy); - status = nfserrno(-ENOMEM); + status = nfserr_jukebox; goto out; } diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 7d5d794e2e32..659698971810 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -658,7 +658,8 @@ nfs4_reset_recoverydir(char *recdir) return status; status = -ENOTDIR; if (d_is_dir(path.dentry)) { - strcpy(user_recovery_dirname, recdir); + strscpy(user_recovery_dirname, recdir, + sizeof(user_recovery_dirname)); status = 0; } path_put(&path); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 893d099e0099..bdee95d714d0 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1069,6 +1069,12 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) return openlockstateid(stid); } +/* + * As the sc_free callback of deleg, this may be called by nfs4_put_stid + * in nfsd_break_one_deleg. + * Considering nfsd_break_one_deleg is called with the flc->flc_lock held, + * this function mustn't ever sleep. + */ static void nfs4_free_deleg(struct nfs4_stid *stid) { struct nfs4_delegation *dp = delegstateid(stid); @@ -1628,6 +1634,14 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp) free_ol_stateid_reaplist(&reaplist); } +static bool nfs4_openowner_unhashed(struct nfs4_openowner *oo) +{ + lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); + + return list_empty(&oo->oo_owner.so_strhash) && + list_empty(&oo->oo_perclient); +} + static void unhash_openowner_locked(struct nfs4_openowner *oo) { struct nfs4_client *clp = oo->oo_owner.so_client; @@ -4635,6 +4649,12 @@ retry: spin_lock(&oo->oo_owner.so_client->cl_lock); spin_lock(&fp->fi_lock); + if (nfs4_openowner_unhashed(oo)) { + mutex_unlock(&stp->st_mutex); + stp = NULL; + goto out_unlock; + } + retstp = nfsd4_find_existing_open(fp, open); if (retstp) goto out_unlock; @@ -4909,6 +4929,7 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { static void nfsd_break_one_deleg(struct nfs4_delegation *dp) { + bool queued; /* * We're assuming the state code never drops its reference * without first removing the lease. Since we're in this lease @@ -4917,7 +4938,10 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) * we know it's safe to take a reference. */ refcount_inc(&dp->dl_stid.sc_count); - WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall)); + queued = nfsd4_run_cb(&dp->dl_recall); + WARN_ON_ONCE(!queued); + if (!queued) + refcount_dec(&dp->dl_stid.sc_count); } /* Called from break_lease() with flc_lock held. */ @@ -5709,6 +5733,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf if (!stp) { stp = init_open_stateid(fp, open); + if (!stp) { + status = nfserr_jukebox; + goto out; + } + if (!open->op_stp) new_stp = true; } @@ -6218,14 +6247,19 @@ deleg_reaper(struct nfsd_net *nn) spin_lock(&nn->client_lock); list_for_each_safe(pos, next, &nn->client_lru) { clp = list_entry(pos, struct nfs4_client, cl_lru); - if (clp->cl_state != NFSD4_ACTIVE || - list_empty(&clp->cl_delegations) || - atomic_read(&clp->cl_delegs_in_recall) || - test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) || - (ktime_get_boottime_seconds() - - clp->cl_ra_time < 5)) { + + if (clp->cl_state != NFSD4_ACTIVE) + continue; + if (list_empty(&clp->cl_delegations)) + continue; + if (atomic_read(&clp->cl_delegs_in_recall)) + continue; + if (test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags)) + continue; + if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5) + continue; + if (clp->cl_cb_state != NFSD4_CB_UP) continue; - } list_add(&clp->cl_ra_cblist, &cblist); /* release in nfsd4_cb_recall_any_release */ @@ -8076,6 +8110,7 @@ static int nfs4_state_create_net(struct net *net) spin_lock_init(&nn->client_lock); spin_lock_init(&nn->s2s_cp_lock); idr_init(&nn->s2s_cp_stateids); + atomic_set(&nn->pending_async_copies, 0); spin_lock_init(&nn->blocked_locks_lock); INIT_LIST_HEAD(&nn->blocked_locks_lru); @@ -8188,7 +8223,7 @@ nfs4_state_shutdown_net(struct net *net) struct nfsd_net *nn = net_generic(net, nfsd_net_id); unregister_shrinker(&nn->nfsd_client_shrinker); - cancel_work(&nn->nfsd_shrinker_work); + cancel_work_sync(&nn->nfsd_shrinker_work); cancel_delayed_work_sync(&nn->laundromat_work); locks_end_grace(&nn->nfsd4_manager); diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h index 513e028b0bbe..40aee06ebd95 100644 --- a/fs/nfsd/nfsfh.h +++ b/fs/nfsd/nfsfh.h @@ -263,7 +263,6 @@ static inline bool fh_fsid_match(const struct knfsd_fh *fh1, return true; } -#ifdef CONFIG_CRC32 /** * knfsd_fh_hash - calculate the crc32 hash for the filehandle * @fh - pointer to filehandle @@ -275,12 +274,6 @@ static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh) { return ~crc32_le(0xFFFFFFFF, fh->fh_raw, fh->fh_size); } -#else -static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh) -{ - return 0; -} -#endif /** * fh_clear_pre_post_attrs - Reset pre/post attributes diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 510978e602da..9bd1ade6ba54 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -574,6 +574,7 @@ struct nfsd4_copy { struct nfsd4_ssc_umount_item *ss_nsui; struct nfs_fh c_fh; nfs4_stateid stateid; + struct nfsd_net *cp_nn; }; static inline void nfsd4_copy_set_sync(struct nfsd4_copy *copy, bool sync) diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 19ed9015bd66..abe3d5e1d84c 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -35,6 +35,7 @@ void nilfs_init_btnc_inode(struct inode *btnc_inode) ii->i_flags = 0; memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap)); mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS); + btnc_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops; } void nilfs_btnode_cache_clear(struct address_space *btnc) @@ -68,7 +69,6 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) goto failed; } memset(bh->b_data, 0, i_blocksize(inode)); - bh->b_bdev = inode->i_sb->s_bdev; bh->b_blocknr = blocknr; set_buffer_mapped(bh); set_buffer_uptodate(bh); @@ -133,7 +133,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, goto found; } set_buffer_mapped(bh); - bh->b_bdev = inode->i_sb->s_bdev; bh->b_blocknr = pblocknr; /* set block address for read */ bh->b_end_io = end_buffer_read_sync; get_bh(bh); diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index b60f4fb9a405..0f3753af1674 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -64,19 +64,13 @@ static inline unsigned int nilfs_chunk_size(struct inode *inode) return inode->i_sb->s_blocksize; } -static inline void nilfs_put_page(struct page *page) -{ - kunmap(page); - put_page(page); -} - /* * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr) { - unsigned int last_byte = inode->i_size; + u64 last_byte = inode->i_size; last_byte -= page_nr << PAGE_SHIFT; if (last_byte > PAGE_SIZE) @@ -450,8 +444,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino) return 0; } -/* Releases the page */ -void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, +int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, struct page *page, struct inode *inode) { unsigned int from = (char *)de - (char *)page_address(page); @@ -461,12 +454,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, lock_page(page); err = nilfs_prepare_chunk(page, from, to); - BUG_ON(err); + if (unlikely(err)) { + unlock_page(page); + return err; + } de->inode = cpu_to_le64(inode->i_ino); nilfs_set_de_type(de, inode); nilfs_commit_chunk(page, mapping, from, to); - nilfs_put_page(page); dir->i_mtime = dir->i_ctime = current_time(dir); + return 0; } /* @@ -569,7 +565,7 @@ out_unlock: /* * nilfs_delete_entry deletes a directory entry by merging it with the - * previous entry. Page is up-to-date. Releases the page. + * previous entry. Page is up-to-date. */ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) { @@ -598,14 +594,16 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) from = (char *)pde - (char *)page_address(page); lock_page(page); err = nilfs_prepare_chunk(page, from, to); - BUG_ON(err); + if (unlikely(err)) { + unlock_page(page); + goto out; + } if (pde) pde->rec_len = nilfs_rec_len_to_disk(to - from); dir->inode = 0; nilfs_commit_chunk(page, mapping, from, to); inode->i_ctime = inode->i_mtime = current_time(inode); out: - nilfs_put_page(page); return err; } diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index fcd13da5d012..8cdb1dc76f61 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -83,10 +83,8 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, goto out; } - if (!buffer_mapped(bh)) { - bh->b_bdev = inode->i_sb->s_bdev; + if (!buffer_mapped(bh)) set_buffer_mapped(bh); - } bh->b_blocknr = pbn; bh->b_end_io = end_buffer_read_sync; get_bh(bh); @@ -165,7 +163,7 @@ int nilfs_init_gcinode(struct inode *inode) inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); - inode->i_mapping->a_ops = &empty_aops; + inode->i_mapping->a_ops = &nilfs_buffer_cache_aops; ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 8eb4288d46fe..452fb23d2e4c 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -162,7 +162,7 @@ static int nilfs_writepages(struct address_space *mapping, int err = 0; if (sb_rdonly(inode->i_sb)) { - nilfs_clear_dirty_pages(mapping, false); + nilfs_clear_dirty_pages(mapping); return -EROFS; } @@ -185,7 +185,7 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) * have dirty pages that try to be flushed in background. * So, here we simply discard this dirty page. */ - nilfs_clear_dirty_page(page, false); + nilfs_clear_dirty_page(page); unlock_page(page); return -EROFS; } @@ -309,6 +309,10 @@ const struct address_space_operations nilfs_aops = { .is_partially_uptodate = block_is_partially_uptodate, }; +const struct address_space_operations nilfs_buffer_cache_aops = { + .invalidate_folio = block_invalidate_folio, +}; + static int nilfs_insert_inode_locked(struct inode *inode, struct nilfs_root *root, unsigned long ino) @@ -614,8 +618,14 @@ struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root, inode = nilfs_iget_locked(sb, root, ino); if (unlikely(!inode)) return ERR_PTR(-ENOMEM); - if (!(inode->i_state & I_NEW)) + + if (!(inode->i_state & I_NEW)) { + if (!inode->i_nlink) { + iput(inode); + return ERR_PTR(-ESTALE); + } return inode; + } err = __nilfs_read_inode(sb, root, ino, inode); if (unlikely(err)) { @@ -748,6 +758,7 @@ struct inode *nilfs_iget_for_shadow(struct inode *inode) NILFS_I(s_inode)->i_flags = 0; memset(NILFS_I(s_inode)->i_bmap, 0, sizeof(struct nilfs_bmap)); mapping_set_gfp_mask(s_inode->i_mapping, GFP_NOFS); + s_inode->i_mapping->a_ops = &nilfs_buffer_cache_aops; err = nilfs_attach_btree_node_cache(s_inode); if (unlikely(err)) { @@ -1256,7 +1267,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, if (size) { if (phys && blkphy << blkbits == phys + size) { /* The current extent goes on */ - size += n << blkbits; + size += (u64)n << blkbits; } else { /* Terminate the current extent */ ret = fiemap_fill_next_extent( @@ -1269,14 +1280,14 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, flags = FIEMAP_EXTENT_MERGED; logical = blkoff << blkbits; phys = blkphy << blkbits; - size = n << blkbits; + size = (u64)n << blkbits; } } else { /* Start a new extent */ flags = FIEMAP_EXTENT_MERGED; logical = blkoff << blkbits; phys = blkphy << blkbits; - size = n << blkbits; + size = (u64)n << blkbits; } blkoff += n; } diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index cbf4fa60eea2..233954e22448 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -89,7 +89,6 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, if (buffer_uptodate(bh)) goto failed_bh; - bh->b_bdev = sb->s_bdev; err = nilfs_mdt_insert_new_block(inode, block, bh, init_block); if (likely(!err)) { get_bh(bh); @@ -412,7 +411,7 @@ nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) * have dirty pages that try to be flushed in background. * So, here we simply discard this dirty page. */ - nilfs_clear_dirty_page(page, false); + nilfs_clear_dirty_page(page); unlock_page(page); return -EROFS; } @@ -635,10 +634,10 @@ void nilfs_mdt_restore_from_shadow_map(struct inode *inode) if (mi->mi_palloc_cache) nilfs_palloc_clear_cache(inode); - nilfs_clear_dirty_pages(inode->i_mapping, true); + nilfs_clear_dirty_pages(inode->i_mapping); nilfs_copy_back_pages(inode->i_mapping, shadow->inode->i_mapping); - nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping, true); + nilfs_clear_dirty_pages(ii->i_assoc_inode->i_mapping); nilfs_copy_back_pages(ii->i_assoc_inode->i_mapping, NILFS_I(shadow->inode)->i_assoc_inode->i_mapping); diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index ffaca278acd3..67d66207fae1 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -67,6 +67,11 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) inode = NULL; } else { inode = nilfs_iget(dir->i_sb, NILFS_I(dir)->i_root, ino); + if (inode == ERR_PTR(-ESTALE)) { + nilfs_error(dir->i_sb, + "deleted inode referenced: %lu", ino); + return ERR_PTR(-EIO); + } } return d_splice_alias(inode, dentry); @@ -292,6 +297,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry) set_nlink(inode, 1); } err = nilfs_delete_entry(de, page); + nilfs_put_page(page); if (err) goto out; @@ -400,7 +406,10 @@ static int nilfs_rename(struct user_namespace *mnt_userns, err = PTR_ERR(new_de); goto out_dir; } - nilfs_set_link(new_dir, new_de, new_page, old_inode); + err = nilfs_set_link(new_dir, new_de, new_page, old_inode); + nilfs_put_page(new_page); + if (unlikely(err)) + goto out_dir; nilfs_mark_inode_dirty(new_dir); new_inode->i_ctime = current_time(new_inode); if (dir_de) @@ -423,28 +432,27 @@ static int nilfs_rename(struct user_namespace *mnt_userns, */ old_inode->i_ctime = current_time(old_inode); - nilfs_delete_entry(old_de, old_page); - - if (dir_de) { - nilfs_set_link(old_inode, dir_de, dir_page, new_dir); - drop_nlink(old_dir); + err = nilfs_delete_entry(old_de, old_page); + if (likely(!err)) { + if (dir_de) { + err = nilfs_set_link(old_inode, dir_de, dir_page, + new_dir); + drop_nlink(old_dir); + } + nilfs_mark_inode_dirty(old_dir); } - nilfs_mark_inode_dirty(old_dir); nilfs_mark_inode_dirty(old_inode); - err = nilfs_transaction_commit(old_dir->i_sb); - return err; - out_dir: - if (dir_de) { - kunmap(dir_page); - put_page(dir_page); - } + if (dir_de) + nilfs_put_page(dir_page); out_old: - kunmap(old_page); - put_page(old_page); + nilfs_put_page(old_page); out: - nilfs_transaction_abort(old_dir->i_sb); + if (likely(!err)) + err = nilfs_transaction_commit(old_dir->i_sb); + else + nilfs_transaction_abort(old_dir->i_sb); return err; } diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index ee27bb370d77..dadafad2fae7 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -240,8 +240,14 @@ nilfs_find_entry(struct inode *, const struct qstr *, struct page **); extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *); extern int nilfs_empty_dir(struct inode *); extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **); -extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *, - struct page *, struct inode *); +int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, + struct page *page, struct inode *inode); + +static inline void nilfs_put_page(struct page *page) +{ + kunmap(page); + put_page(page); +} /* file.c */ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int); @@ -379,6 +385,7 @@ extern const struct file_operations nilfs_dir_operations; extern const struct inode_operations nilfs_file_inode_operations; extern const struct file_operations nilfs_file_operations; extern const struct address_space_operations nilfs_aops; +extern const struct address_space_operations nilfs_buffer_cache_aops; extern const struct inode_operations nilfs_dir_inode_operations; extern const struct inode_operations nilfs_special_inode_operations; extern const struct inode_operations nilfs_symlink_inode_operations; diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 6bc4cda804e1..bf090240f6ed 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -39,7 +39,6 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); bh = nilfs_page_get_nth_block(page, block - first_block); - touch_buffer(bh); wait_on_buffer(bh); return bh; } @@ -64,6 +63,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode, put_page(page); return NULL; } + bh->b_bdev = inode->i_sb->s_bdev; return bh; } @@ -354,9 +354,8 @@ repeat: /** * nilfs_clear_dirty_pages - discard dirty pages in address space * @mapping: address space with dirty pages for discarding - * @silent: suppress [true] or print [false] warning messages */ -void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) +void nilfs_clear_dirty_pages(struct address_space *mapping) { struct pagevec pvec; unsigned int i; @@ -377,7 +376,7 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) * was acquired. Skip processing in that case. */ if (likely(page->mapping == mapping)) - nilfs_clear_dirty_page(page, silent); + nilfs_clear_dirty_page(page); unlock_page(page); } @@ -389,44 +388,54 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) /** * nilfs_clear_dirty_page - discard dirty page * @page: dirty page that will be discarded - * @silent: suppress [true] or print [false] warning messages + * + * nilfs_clear_dirty_page() clears working states including dirty state for + * the page and its buffers. If the page has buffers, clear only if it is + * confirmed that none of the buffer heads are busy (none have valid + * references and none are locked). */ -void nilfs_clear_dirty_page(struct page *page, bool silent) +void nilfs_clear_dirty_page(struct page *page) { - struct inode *inode = page->mapping->host; - struct super_block *sb = inode->i_sb; - BUG_ON(!PageLocked(page)); - if (!silent) - nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu", - page_offset(page), inode->i_ino); - - ClearPageUptodate(page); - ClearPageMappedToDisk(page); - ClearPageChecked(page); - if (page_has_buffers(page)) { - struct buffer_head *bh, *head; + struct buffer_head *bh, *head = page_buffers(page); const unsigned long clear_bits = (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) | BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) | BIT(BH_Delay)); + bool busy, invalidated = false; - bh = head = page_buffers(page); +recheck_buffers: + busy = false; + bh = head; + do { + if (atomic_read(&bh->b_count) | buffer_locked(bh)) { + busy = true; + break; + } + } while (bh = bh->b_this_page, bh != head); + + if (busy) { + if (invalidated) + return; + invalidate_bh_lrus(); + invalidated = true; + goto recheck_buffers; + } + + bh = head; do { lock_buffer(bh); - if (!silent) - nilfs_warn(sb, - "discard dirty block: blocknr=%llu, size=%zu", - (u64)bh->b_blocknr, bh->b_size); - set_mask_bits(&bh->b_state, clear_bits, 0); unlock_buffer(bh); } while (bh = bh->b_this_page, bh != head); } + ClearPageUptodate(page); + ClearPageMappedToDisk(page); + ClearPageChecked(page); __nilfs_clear_page_dirty(page); } diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index 21ddcdd4d63e..f5432f6a2fb6 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -41,8 +41,8 @@ void nilfs_page_bug(struct page *); int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); void nilfs_copy_back_pages(struct address_space *, struct address_space *); -void nilfs_clear_dirty_page(struct page *, bool); -void nilfs_clear_dirty_pages(struct address_space *, bool); +void nilfs_clear_dirty_page(struct page *page); +void nilfs_clear_dirty_pages(struct address_space *mapping); unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int, unsigned int); unsigned long nilfs_find_uncommitted_extent(struct inode *inode, diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 6bc8ad0d41f8..af4ab80443fe 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -732,7 +732,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, } if (!page_has_buffers(page)) create_empty_buffers(page, i_blocksize(inode), 0); - unlock_page(page); bh = head = page_buffers(page); do { @@ -742,11 +741,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, list_add_tail(&bh->b_assoc_buffers, listp); ndirties++; if (unlikely(ndirties >= nlimit)) { + unlock_page(page); pagevec_release(&pvec); cond_resched(); return ndirties; } } while (bh = bh->b_this_page, bh != head); + + unlock_page(page); } pagevec_release(&pvec); cond_resched(); diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index b5d8f238fce4..9cc4ebb53504 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -310,16 +310,19 @@ static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask, if (!inode_mark) return 0; - if (mask & FS_EVENT_ON_CHILD) { - /* - * Some events can be sent on both parent dir and child marks - * (e.g. FS_ATTRIB). If both parent dir and child are - * watching, report the event once to parent dir with name (if - * interested) and once to child without name (if interested). - * The child watcher is expecting an event without a file name - * and without the FS_EVENT_ON_CHILD flag. - */ - mask &= ~FS_EVENT_ON_CHILD; + /* + * Some events can be sent on both parent dir and child marks (e.g. + * FS_ATTRIB). If both parent dir and child are watching, report the + * event once to parent dir with name (if interested) and once to child + * without name (if interested). + * + * In any case regardless whether the parent is watching or not, the + * child watcher is expecting an event without the FS_EVENT_ON_CHILD + * flag. The file name is expected if and only if this is a directory + * event. + */ + mask &= ~FS_EVENT_ON_CHILD; + if (!(mask & ALL_FSNOTIFY_DIRENT_EVENTS)) { dir = NULL; name = NULL; } diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c index 0388e6b42100..feda45c7ca8e 100644 --- a/fs/ntfs3/attrib.c +++ b/fs/ntfs3/attrib.c @@ -176,7 +176,7 @@ out: int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run, CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, enum ALLOCATE_OPT opt, CLST *alen, const size_t fr, - CLST *new_lcn) + CLST *new_lcn, CLST *new_len) { int err; CLST flen, vcn0 = vcn, pre = pre_alloc ? *pre_alloc : 0; @@ -196,20 +196,36 @@ int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run, if (err) goto out; - if (new_lcn && vcn == vcn0) - *new_lcn = lcn; + if (vcn == vcn0) { + /* Return the first fragment. */ + if (new_lcn) + *new_lcn = lcn; + if (new_len) + *new_len = flen; + } /* Add new fragment into run storage. */ - if (!run_add_entry(run, vcn, lcn, flen, opt == ALLOCATE_MFT)) { + if (!run_add_entry(run, vcn, lcn, flen, opt & ALLOCATE_MFT)) { /* Undo last 'ntfs_look_for_free_space' */ mark_as_free_ex(sbi, lcn, len, false); err = -ENOMEM; goto out; } + if (opt & ALLOCATE_ZERO) { + u8 shift = sbi->cluster_bits - SECTOR_SHIFT; + + err = blkdev_issue_zeroout(sbi->sb->s_bdev, + (sector_t)lcn << shift, + (sector_t)flen << shift, + GFP_NOFS, 0); + if (err) + goto out; + } + vcn += flen; - if (flen >= len || opt == ALLOCATE_MFT || + if (flen >= len || (opt & ALLOCATE_MFT) || (fr && run->count - cnt >= fr)) { *alen = vcn - vcn0; return 0; @@ -287,7 +303,8 @@ int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr, const char *data = resident_data(attr); err = attr_allocate_clusters(sbi, run, 0, 0, len, NULL, - ALLOCATE_DEF, &alen, 0, NULL); + ALLOCATE_DEF, &alen, 0, NULL, + NULL); if (err) goto out1; @@ -582,13 +599,13 @@ add_alloc_in_same_attr_seg: /* ~3 bytes per fragment. */ err = attr_allocate_clusters( sbi, run, vcn, lcn, to_allocate, &pre_alloc, - is_mft ? ALLOCATE_MFT : 0, &alen, + is_mft ? ALLOCATE_MFT : ALLOCATE_DEF, &alen, is_mft ? 0 : (sbi->record_size - le32_to_cpu(rec->used) + 8) / 3 + 1, - NULL); + NULL, NULL); if (err) goto out; } @@ -886,8 +903,19 @@ bad_inode: return err; } +/* + * attr_data_get_block - Returns 'lcn' and 'len' for given 'vcn'. + * + * @new == NULL means just to get current mapping for 'vcn' + * @new != NULL means allocate real cluster if 'vcn' maps to hole + * @zero - zeroout new allocated clusters + * + * NOTE: + * - @new != NULL is called only for sparsed or compressed attributes. + * - new allocated clusters are zeroed via blkdev_issue_zeroout. + */ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, - CLST *len, bool *new) + CLST *len, bool *new, bool zero) { int err = 0; struct runs_tree *run = &ni->file.run; @@ -896,29 +924,27 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, struct ATTRIB *attr = NULL, *attr_b; struct ATTR_LIST_ENTRY *le, *le_b; struct mft_inode *mi, *mi_b; - CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end; + CLST hint, svcn, to_alloc, evcn1, next_svcn, asize, end, vcn0, alen; + unsigned int fr; u64 total_size; - u32 clst_per_frame; - bool ok; if (new) *new = false; + /* Try to find in cache. */ down_read(&ni->file.run_lock); - ok = run_lookup_entry(run, vcn, lcn, len, NULL); + if (!run_lookup_entry(run, vcn, lcn, len, NULL)) + *len = 0; up_read(&ni->file.run_lock); - if (ok && (*lcn != SPARSE_LCN || !new)) { - /* Normal way. */ - return 0; + if (*len) { + if (*lcn != SPARSE_LCN || !new) + return 0; /* Fast normal way without allocation. */ + else if (clen > *len) + clen = *len; } - if (!clen) - clen = 1; - - if (ok && clen > *len) - clen = *len; - + /* No cluster in cache or we need to allocate cluster in hole. */ sbi = ni->mi.sbi; cluster_bits = sbi->cluster_bits; @@ -944,12 +970,6 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, goto out; } - clst_per_frame = 1u << attr_b->nres.c_unit; - to_alloc = (clen + clst_per_frame - 1) & ~(clst_per_frame - 1); - - if (vcn + to_alloc > asize) - to_alloc = asize - vcn; - svcn = le64_to_cpu(attr_b->nres.svcn); evcn1 = le64_to_cpu(attr_b->nres.evcn) + 1; @@ -968,36 +988,68 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, evcn1 = le64_to_cpu(attr->nres.evcn) + 1; } + /* Load in cache actual information. */ err = attr_load_runs(attr, ni, run, NULL); if (err) goto out; - if (!ok) { - ok = run_lookup_entry(run, vcn, lcn, len, NULL); - if (ok && (*lcn != SPARSE_LCN || !new)) { - /* Normal way. */ - err = 0; - goto ok; - } + if (!*len) { + if (run_lookup_entry(run, vcn, lcn, len, NULL)) { + if (*lcn != SPARSE_LCN || !new) + goto ok; /* Slow normal way without allocation. */ - if (!ok && !new) { - *len = 0; - err = 0; + if (clen > *len) + clen = *len; + } else if (!new) { + /* Here we may return -ENOENT. + * In any case caller gets zero length. */ goto ok; } - - if (ok && clen > *len) { - clen = *len; - to_alloc = (clen + clst_per_frame - 1) & - ~(clst_per_frame - 1); - } } if (!is_attr_ext(attr_b)) { + /* The code below only for sparsed or compressed attributes. */ err = -EINVAL; goto out; } + vcn0 = vcn; + to_alloc = clen; + fr = (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1; + /* Allocate frame aligned clusters. + * ntfs.sys usually uses 16 clusters per frame for sparsed or compressed. + * ntfs3 uses 1 cluster per frame for new created sparsed files. */ + if (attr_b->nres.c_unit) { + CLST clst_per_frame = 1u << attr_b->nres.c_unit; + CLST cmask = ~(clst_per_frame - 1); + + /* Get frame aligned vcn and to_alloc. */ + vcn = vcn0 & cmask; + to_alloc = ((vcn0 + clen + clst_per_frame - 1) & cmask) - vcn; + if (fr < clst_per_frame) + fr = clst_per_frame; + zero = true; + + /* Check if 'vcn' and 'vcn0' in different attribute segments. */ + if (vcn < svcn || evcn1 <= vcn) { + /* Load attribute for truncated vcn. */ + attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0, + &vcn, &mi); + if (!attr) { + err = -EINVAL; + goto out; + } + svcn = le64_to_cpu(attr->nres.svcn); + evcn1 = le64_to_cpu(attr->nres.evcn) + 1; + err = attr_load_runs(attr, ni, run, NULL); + if (err) + goto out; + } + } + + if (vcn + to_alloc > asize) + to_alloc = asize - vcn; + /* Get the last LCN to allocate from. */ hint = 0; @@ -1011,18 +1063,33 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, hint = -1; } - err = attr_allocate_clusters( - sbi, run, vcn, hint + 1, to_alloc, NULL, 0, len, - (sbi->record_size - le32_to_cpu(mi->mrec->used) + 8) / 3 + 1, - lcn); + /* Allocate and zeroout new clusters. */ + err = attr_allocate_clusters(sbi, run, vcn, hint + 1, to_alloc, NULL, + zero ? ALLOCATE_ZERO : ALLOCATE_DEF, &alen, + fr, lcn, len); if (err) goto out; *new = true; - end = vcn + *len; - + end = vcn + alen; total_size = le64_to_cpu(attr_b->nres.total_size) + - ((u64)*len << cluster_bits); + ((u64)alen << cluster_bits); + + if (vcn != vcn0) { + if (!run_lookup_entry(run, vcn0, lcn, len, NULL)) { + err = -EINVAL; + goto out; + } + if (*lcn == SPARSE_LCN) { + /* Internal error. Should not happened. */ + WARN_ON(1); + err = -EINVAL; + goto out; + } + /* Check case when vcn0 + len overlaps new allocated clusters. */ + if (vcn0 + *len > end) + *len = end - vcn0; + } repack: err = mi_pack_runs(mi, attr, run, max(end, evcn1) - svcn); @@ -1547,7 +1614,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size, struct ATTRIB *attr = NULL, *attr_b; struct ATTR_LIST_ENTRY *le, *le_b; struct mft_inode *mi, *mi_b; - CLST svcn, evcn1, next_svcn, lcn, len; + CLST svcn, evcn1, next_svcn, len; CLST vcn, end, clst_data; u64 total_size, valid_size, data_size; @@ -1623,8 +1690,9 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size, } err = attr_allocate_clusters(sbi, run, vcn + clst_data, - hint + 1, len - clst_data, NULL, 0, - &alen, 0, &lcn); + hint + 1, len - clst_data, NULL, + ALLOCATE_DEF, &alen, 0, NULL, + NULL); if (err) goto out; diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index aedd4f5f459e..46eec986ec9c 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -122,8 +122,8 @@ static int ntfs_extend_initialized_size(struct file *file, bits = sbi->cluster_bits; vcn = pos >> bits; - err = attr_data_get_block(ni, vcn, 0, &lcn, &clen, - NULL); + err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL, + false); if (err) goto out; @@ -196,18 +196,18 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) struct address_space *mapping = inode->i_mapping; u32 blocksize = 1 << inode->i_blkbits; pgoff_t idx = vbo >> PAGE_SHIFT; - u32 z_start = vbo & (PAGE_SIZE - 1); + u32 from = vbo & (PAGE_SIZE - 1); pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT; loff_t page_off; struct buffer_head *head, *bh; - u32 bh_next, bh_off, z_end; + u32 bh_next, bh_off, to; sector_t iblock; struct page *page; - for (; idx < idx_end; idx += 1, z_start = 0) { + for (; idx < idx_end; idx += 1, from = 0) { page_off = (loff_t)idx << PAGE_SHIFT; - z_end = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) - : PAGE_SIZE; + to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) + : PAGE_SIZE; iblock = page_off >> inode->i_blkbits; page = find_or_create_page(mapping, idx, @@ -224,7 +224,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) do { bh_next = bh_off + blocksize; - if (bh_next <= z_start || bh_off >= z_end) + if (bh_next <= from || bh_off >= to) continue; if (!buffer_mapped(bh)) { @@ -258,7 +258,7 @@ static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to) } while (bh_off = bh_next, iblock += 1, head != (bh = bh->b_this_page)); - zero_user_segment(page, z_start, z_end); + zero_user_segment(page, from, to); unlock_page(page); put_page(page); @@ -269,81 +269,6 @@ out: return err; } -/* - * ntfs_sparse_cluster - Helper function to zero a new allocated clusters. - * - * NOTE: 512 <= cluster size <= 2M - */ -void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn, - CLST len) -{ - struct address_space *mapping = inode->i_mapping; - struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info; - u64 vbo = (u64)vcn << sbi->cluster_bits; - u64 bytes = (u64)len << sbi->cluster_bits; - u32 blocksize = 1 << inode->i_blkbits; - pgoff_t idx0 = page0 ? page0->index : -1; - loff_t vbo_clst = vbo & sbi->cluster_mask_inv; - loff_t end = ntfs_up_cluster(sbi, vbo + bytes); - pgoff_t idx = vbo_clst >> PAGE_SHIFT; - u32 from = vbo_clst & (PAGE_SIZE - 1); - pgoff_t idx_end = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; - loff_t page_off; - u32 to; - bool partial; - struct page *page; - - for (; idx < idx_end; idx += 1, from = 0) { - page = idx == idx0 ? page0 : grab_cache_page(mapping, idx); - - if (!page) - continue; - - page_off = (loff_t)idx << PAGE_SHIFT; - to = (page_off + PAGE_SIZE) > end ? (end - page_off) - : PAGE_SIZE; - partial = false; - - if ((from || PAGE_SIZE != to) && - likely(!page_has_buffers(page))) { - create_empty_buffers(page, blocksize, 0); - } - - if (page_has_buffers(page)) { - struct buffer_head *head, *bh; - u32 bh_off = 0; - - bh = head = page_buffers(page); - do { - u32 bh_next = bh_off + blocksize; - - if (from <= bh_off && bh_next <= to) { - set_buffer_uptodate(bh); - mark_buffer_dirty(bh); - } else if (!buffer_uptodate(bh)) { - partial = true; - } - bh_off = bh_next; - } while (head != (bh = bh->b_this_page)); - } - - zero_user_segment(page, from, to); - - if (!partial) { - if (!PageUptodate(page)) - SetPageUptodate(page); - set_page_dirty(page); - } - - if (idx != idx0) { - unlock_page(page); - put_page(page); - } - cond_resched(); - } - mark_inode_dirty(inode); -} - /* * ntfs_file_mmap - file_operations::mmap */ @@ -385,13 +310,9 @@ static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma) for (; vcn < end; vcn += len) { err = attr_data_get_block(ni, vcn, 1, &lcn, - &len, &new); + &len, &new, true); if (err) goto out; - - if (!new) - continue; - ntfs_sparse_cluster(inode, NULL, vcn, 1); } } @@ -433,6 +354,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count, } if (extend_init && !is_compressed(ni)) { + WARN_ON(ni->i_valid >= pos); err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos); if (err) goto out; @@ -532,7 +454,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) struct ntfs_sb_info *sbi = sb->s_fs_info; struct ntfs_inode *ni = ntfs_i(inode); loff_t end = vbo + len; - loff_t vbo_down = round_down(vbo, PAGE_SIZE); + loff_t vbo_down = round_down(vbo, max_t(unsigned long, + sbi->cluster_size, PAGE_SIZE)); bool is_supported_holes = is_sparsed(ni) || is_compressed(ni); loff_t i_size, new_size; bool map_locked; @@ -585,11 +508,8 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) u32 frame_size; loff_t mask, vbo_a, end_a, tmp; - err = filemap_write_and_wait_range(mapping, vbo, end - 1); - if (err) - goto out; - - err = filemap_write_and_wait_range(mapping, end, LLONG_MAX); + err = filemap_write_and_wait_range(mapping, vbo_down, + LLONG_MAX); if (err) goto out; @@ -692,39 +612,35 @@ static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len) goto out; if (is_supported_holes) { - CLST vcn_v = ni->i_valid >> sbi->cluster_bits; CLST vcn = vbo >> sbi->cluster_bits; CLST cend = bytes_to_cluster(sbi, end); + CLST cend_v = bytes_to_cluster(sbi, ni->i_valid); CLST lcn, clen; bool new; + if (cend_v > cend) + cend_v = cend; + /* - * Allocate but do not zero new clusters. (see below comments) - * This breaks security: One can read unused on-disk areas. + * Allocate and zero new clusters. * Zeroing these clusters may be too long. - * Maybe we should check here for root rights? + */ + for (; vcn < cend_v; vcn += clen) { + err = attr_data_get_block(ni, vcn, cend_v - vcn, + &lcn, &clen, &new, + true); + if (err) + goto out; + } + /* + * Allocate but not zero new clusters. */ for (; vcn < cend; vcn += clen) { err = attr_data_get_block(ni, vcn, cend - vcn, - &lcn, &clen, &new); + &lcn, &clen, &new, + false); if (err) goto out; - if (!new || vcn >= vcn_v) - continue; - - /* - * Unwritten area. - * NTFS is not able to store several unwritten areas. - * Activate 'ntfs_sparse_cluster' to zero new allocated clusters. - * - * Dangerous in case: - * 1G of sparsed clusters + 1 cluster of data => - * valid_size == 1G + 1 cluster - * fallocate(1G) will zero 1G and this can be very long - * xfstest 016/086 will fail without 'ntfs_sparse_cluster'. - */ - ntfs_sparse_cluster(inode, NULL, vcn, - min(vcn_v - vcn, clen)); } } @@ -945,8 +861,8 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) frame_vbo = valid & ~(frame_size - 1); off = valid & (frame_size - 1); - err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 0, &lcn, - &clen, NULL); + err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn, + &clen, NULL, false); if (err) goto out; @@ -1214,8 +1130,16 @@ static int ntfs_file_release(struct inode *inode, struct file *file) int err = 0; /* If we are last writer on the inode, drop the block reservation. */ - if (sbi->options->prealloc && ((file->f_mode & FMODE_WRITE) && - atomic_read(&inode->i_writecount) == 1)) { + if (sbi->options->prealloc && + ((file->f_mode & FMODE_WRITE) && + atomic_read(&inode->i_writecount) == 1) + /* + * The only file when inode->i_fop = &ntfs_file_operations and + * init_rwsem(&ni->file.run_lock) is not called explicitly is MFT. + * + * Add additional check here. + */ + && inode->i_ino != MFT_REC_MFT) { ni_lock(ni); down_write(&ni->file.run_lock); diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index d41ddc06f207..fb572688f919 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -2297,7 +2297,7 @@ int ni_decompress_file(struct ntfs_inode *ni) for (vcn = vbo >> sbi->cluster_bits; vcn < end; vcn += clen) { err = attr_data_get_block(ni, vcn, cend - vcn, &lcn, - &clen, &new); + &clen, &new, false); if (err) goto out; } diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 2589f6d1215f..139bdaececd7 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -617,7 +617,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes) u32 off = le32_to_cpu(hdr->de_off); if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot || - off + sizeof(struct NTFS_DE) > end) { + size_add(off, sizeof(struct NTFS_DE)) > end) { /* incorrect index buffer. */ return false; } @@ -736,7 +736,7 @@ fill_table: if (end > total) return NULL; - if (off + sizeof(struct NTFS_DE) > end) + if (size_add(off, sizeof(struct NTFS_DE)) > end) return NULL; e = Add2Ptr(hdr, off); @@ -1442,8 +1442,8 @@ static int indx_create_allocate(struct ntfs_index *indx, struct ntfs_inode *ni, run_init(&run); - err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, 0, &alen, 0, - NULL); + err = attr_allocate_clusters(sbi, &run, 0, 0, len, NULL, ALLOCATE_DEF, + &alen, 0, NULL, NULL); if (err) goto out; diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 057aa3cec902..5baf6a2b3d48 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -592,7 +592,8 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo, off = vbo & sbi->cluster_mask; new = false; - err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL); + err = attr_data_get_block(ni, vcn, 1, &lcn, &len, create ? &new : NULL, + create && sbi->cluster_size > PAGE_SIZE); if (err) goto out; @@ -610,11 +611,8 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo, WARN_ON(1); } - if (new) { + if (new) set_buffer_new(bh); - if ((len << cluster_bits) > block_size) - ntfs_sparse_cluster(inode, page, vcn, len); - } lbo = ((u64)lcn << cluster_bits) + off; @@ -1533,8 +1531,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns, cpu_to_le64(ntfs_up_cluster(sbi, nsize)); err = attr_allocate_clusters(sbi, &ni->file.run, 0, 0, - clst, NULL, 0, &alen, 0, - NULL); + clst, NULL, ALLOCATE_DEF, + &alen, 0, NULL, NULL); if (err) goto out5; diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index 26dbe1b46fdd..f2f32e304b3d 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -42,9 +42,11 @@ enum utf16_endian; #define MINUS_ONE_T ((size_t)(-1)) /* Biggest MFT / smallest cluster */ #define MAXIMUM_BYTES_PER_MFT 4096 +#define MAXIMUM_SHIFT_BYTES_PER_MFT 12 #define NTFS_BLOCKS_PER_MFT_RECORD (MAXIMUM_BYTES_PER_MFT / 512) #define MAXIMUM_BYTES_PER_INDEX 4096 +#define MAXIMUM_SHIFT_BYTES_PER_INDEX 12 #define NTFS_BLOCKS_PER_INODE (MAXIMUM_BYTES_PER_INDEX / 512) /* NTFS specific error code when fixup failed. */ @@ -126,6 +128,7 @@ struct ntfs_buffers { enum ALLOCATE_OPT { ALLOCATE_DEF = 0, // Allocate all clusters. ALLOCATE_MFT = 1, // Allocate for MFT. + ALLOCATE_ZERO = 2, // Zeroout new allocated clusters }; enum bitmap_mutex_classes { @@ -416,7 +419,7 @@ enum REPARSE_SIGN { int attr_allocate_clusters(struct ntfs_sb_info *sbi, struct runs_tree *run, CLST vcn, CLST lcn, CLST len, CLST *pre_alloc, enum ALLOCATE_OPT opt, CLST *alen, const size_t fr, - CLST *new_lcn); + CLST *new_lcn, CLST *new_len); int attr_make_nonresident(struct ntfs_inode *ni, struct ATTRIB *attr, struct ATTR_LIST_ENTRY *le, struct mft_inode *mi, u64 new_size, struct runs_tree *run, @@ -426,7 +429,7 @@ int attr_set_size(struct ntfs_inode *ni, enum ATTR_TYPE type, u64 new_size, const u64 *new_valid, bool keep_prealloc, struct ATTRIB **ret); int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn, - CLST *len, bool *new); + CLST *len, bool *new, bool zero); int attr_data_read_resident(struct ntfs_inode *ni, struct page *page); int attr_data_write_resident(struct ntfs_inode *ni, struct page *page); int attr_load_runs_vcn(struct ntfs_inode *ni, enum ATTR_TYPE type, @@ -491,8 +494,6 @@ extern const struct file_operations ntfs_dir_operations; /* Globals from file.c */ int ntfs_getattr(struct user_namespace *mnt_userns, const struct path *path, struct kstat *stat, u32 request_mask, u32 flags); -void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn, - CLST len); int ntfs3_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, struct iattr *attr); int ntfs_file_open(struct inode *inode, struct file *file); diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c index 7ab452710572..bfb1f4c2f271 100644 --- a/fs/ntfs3/record.c +++ b/fs/ntfs3/record.c @@ -217,29 +217,21 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) prev_type = 0; attr = Add2Ptr(rec, off); } else { - /* Check if input attr inside record. */ + /* + * We don't need to check previous attr here. There is + * a bounds checking in the previous round. + */ off = PtrOffset(rec, attr); - if (off >= used) - return NULL; asize = le32_to_cpu(attr->size); - if (asize < SIZEOF_RESIDENT) { - /* Impossible 'cause we should not return such attribute. */ - return NULL; - } - - /* Overflow check. */ - if (off + asize < off) - return NULL; prev_type = le32_to_cpu(attr->type); attr = Add2Ptr(attr, asize); off += asize; } - asize = le32_to_cpu(attr->size); - /* Can we use the first field (attr->type). */ + /* NOTE: this code also checks attr->size availability. */ if (off + 8 > used) { static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8); return NULL; @@ -259,6 +251,8 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) if (t32 < prev_type) return NULL; + asize = le32_to_cpu(attr->size); + /* Check overflow and boundary. */ if (off + asize < off || off + asize > used) return NULL; @@ -273,7 +267,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) if (t16 > asize) return NULL; - if (t16 + le32_to_cpu(attr->res.data_size) > asize) + if (le32_to_cpu(attr->res.data_size) > asize - t16) return NULL; if (attr->name_len && @@ -288,6 +282,10 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) if (attr->non_res != 1) return NULL; + /* Can we use memory including attr->nres.valid_size? */ + if (asize < SIZEOF_NONRESIDENT) + return NULL; + t16 = le16_to_cpu(attr->nres.run_off); if (t16 > asize) return NULL; @@ -314,7 +312,8 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) if (!attr->nres.svcn && is_attr_ext(attr)) { /* First segment of sparse/compressed attribute */ - if (asize + 8 < SIZEOF_NONRESIDENT_EX) + /* Can we use memory including attr->nres.total_size? */ + if (asize < SIZEOF_NONRESIDENT_EX) return NULL; tot_size = le64_to_cpu(attr->nres.total_size); @@ -324,10 +323,10 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr) if (tot_size > alloc_size) return NULL; } else { - if (asize + 8 < SIZEOF_NONRESIDENT) + if (attr->nres.c_unit) return NULL; - if (attr->nres.c_unit) + if (alloc_size > mi->sbi->volume.size) return NULL; } diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c index eee54214f4a3..674a16c0c66b 100644 --- a/fs/ntfs3/super.c +++ b/fs/ntfs3/super.c @@ -680,7 +680,7 @@ static u32 true_sectors_per_clst(const struct NTFS_BOOT *boot) * ntfs_init_from_boot - Init internal info from on-disk boot sector. */ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, - u64 dev_size) + u64 dev_size) { struct ntfs_sb_info *sbi = sb->s_fs_info; int err; @@ -705,12 +705,12 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, /* 0x55AA is not mandaroty. Thanks Maxim Suhanov*/ /*if (0x55 != boot->boot_magic[0] || 0xAA != boot->boot_magic[1]) - * goto out; + * goto out; */ boot_sector_size = (u32)boot->bytes_per_sector[1] << 8; if (boot->bytes_per_sector[0] || boot_sector_size < SECTOR_SIZE || - !is_power_of_2(boot_sector_size)) { + !is_power_of_2(boot_sector_size)) { goto out; } @@ -733,15 +733,49 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, /* Check MFT record size. */ if ((boot->record_size < 0 && - SECTOR_SIZE > (2U << (-boot->record_size))) || - (boot->record_size >= 0 && !is_power_of_2(boot->record_size))) { + SECTOR_SIZE > (2U << (-boot->record_size))) || + (boot->record_size >= 0 && !is_power_of_2(boot->record_size))) { + goto out; + } + + /* Calculate cluster size */ + sbi->cluster_size = boot_sector_size * sct_per_clst; + sbi->cluster_bits = blksize_bits(sbi->cluster_size); + + if (boot->record_size >= 0) { + record_size = (u32)boot->record_size << sbi->cluster_bits; + } else if (-boot->record_size <= MAXIMUM_SHIFT_BYTES_PER_MFT) { + record_size = 1u << (-boot->record_size); + } else { + ntfs_err(sb, "%s: invalid record size %d.", "NTFS", + boot->record_size); + goto out; + } + + sbi->record_size = record_size; + sbi->record_bits = blksize_bits(record_size); + sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes + + if (record_size > MAXIMUM_BYTES_PER_MFT) { + ntfs_err(sb, "Unsupported bytes per MFT record %u.", + record_size); + goto out; + } + + if (boot->index_size >= 0) { + sbi->index_size = (u32)boot->index_size << sbi->cluster_bits; + } else if (-boot->index_size <= MAXIMUM_SHIFT_BYTES_PER_INDEX) { + sbi->index_size = 1u << (-boot->index_size); + } else { + ntfs_err(sb, "%s: invalid index size %d.", "NTFS", + boot->index_size); goto out; } /* Check index record size. */ if ((boot->index_size < 0 && - SECTOR_SIZE > (2U << (-boot->index_size))) || - (boot->index_size >= 0 && !is_power_of_2(boot->index_size))) { + SECTOR_SIZE > (2U << (-boot->index_size))) || + (boot->index_size >= 0 && !is_power_of_2(boot->index_size))) { goto out; } @@ -762,9 +796,6 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, dev_size += sector_size - 1; } - sbi->cluster_size = boot_sector_size * sct_per_clst; - sbi->cluster_bits = blksize_bits(sbi->cluster_size); - sbi->mft.lbo = mlcn << sbi->cluster_bits; sbi->mft.lbo2 = mlcn2 << sbi->cluster_bits; @@ -785,9 +816,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, sbi->cluster_mask = sbi->cluster_size - 1; sbi->cluster_mask_inv = ~(u64)sbi->cluster_mask; sbi->record_size = record_size = boot->record_size < 0 - ? 1 << (-boot->record_size) - : (u32)boot->record_size - << sbi->cluster_bits; + ? 1 << (-boot->record_size) + : (u32)boot->record_size + << sbi->cluster_bits; if (record_size > MAXIMUM_BYTES_PER_MFT || record_size < SECTOR_SIZE) goto out; @@ -801,8 +832,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, ALIGN(sizeof(enum ATTR_TYPE), 8); sbi->index_size = boot->index_size < 0 - ? 1u << (-boot->index_size) - : (u32)boot->index_size << sbi->cluster_bits; + ? 1u << (-boot->index_size) + : (u32)boot->index_size << sbi->cluster_bits; sbi->volume.ser_num = le64_to_cpu(boot->serial_num); @@ -871,13 +902,6 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size, sb->s_maxbytes = 0xFFFFFFFFull << sbi->cluster_bits; #endif - /* - * Compute the MFT zone at two steps. - * It would be nice if we are able to allocate 1/8 of - * total clusters for MFT but not more then 512 MB. - */ - sbi->zone_max = min_t(CLST, 0x20000000 >> sbi->cluster_bits, clusters >> 3); - err = 0; out: diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 51c93929a146..7f11ffacc915 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -1796,6 +1796,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci, el = root_el; while (el->l_tree_depth) { + if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) { + ocfs2_error(ocfs2_metadata_cache_get_super(ci), + "Owner %llu has invalid tree depth %u in extent list\n", + (unsigned long long)ocfs2_metadata_cache_owner(ci), + le16_to_cpu(el->l_tree_depth)); + ret = -EROFS; + goto out; + } if (le16_to_cpu(el->l_next_free_rec) == 0) { ocfs2_error(ocfs2_metadata_cache_get_super(ci), "Owner %llu has empty extent list at depth %u\n", diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 3a520117fa59..a9ce7947228c 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -70,6 +70,8 @@ enum ocfs2_iocb_lock_bits { OCFS2_IOCB_NUM_LOCKS }; +#define ocfs2_iocb_init_rw_locked(iocb) \ + (iocb->private = NULL) #define ocfs2_iocb_clear_rw_locked(iocb) \ clear_bit(OCFS2_IOCB_RW_LOCK, (unsigned long *)&iocb->private) #define ocfs2_iocb_rw_locked_level(iocb) \ diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index d27e15b54be4..de6fd4a09ffd 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -1065,26 +1065,39 @@ int ocfs2_find_entry(const char *name, int namelen, { struct buffer_head *bh; struct ocfs2_dir_entry *res_dir = NULL; + int ret = 0; if (ocfs2_dir_indexed(dir)) return ocfs2_find_entry_dx(name, namelen, dir, lookup); + if (unlikely(i_size_read(dir) <= 0)) { + ret = -EFSCORRUPTED; + mlog_errno(ret); + goto out; + } /* * The unindexed dir code only uses part of the lookup * structure, so there's no reason to push it down further * than this. */ - if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) + if (OCFS2_I(dir)->ip_dyn_features & OCFS2_INLINE_DATA_FL) { + if (unlikely(i_size_read(dir) > dir->i_sb->s_blocksize)) { + ret = -EFSCORRUPTED; + mlog_errno(ret); + goto out; + } bh = ocfs2_find_entry_id(name, namelen, dir, &res_dir); - else + } else { bh = ocfs2_find_entry_el(name, namelen, dir, &res_dir); + } if (bh == NULL) return -ENOENT; lookup->dl_leaf_bh = bh; lookup->dl_entry = res_dir; - return 0; +out: + return ret; } /* @@ -2011,6 +2024,7 @@ int ocfs2_lookup_ino_from_name(struct inode *dir, const char *name, * * Return 0 if the name does not exist * Return -EEXIST if the directory contains the name + * Return -EFSCORRUPTED if found corruption * * Callers should have i_rwsem + a cluster lock on dir */ @@ -2024,9 +2038,12 @@ int ocfs2_check_dir_for_entry(struct inode *dir, trace_ocfs2_check_dir_for_entry( (unsigned long long)OCFS2_I(dir)->ip_blkno, namelen, name); - if (ocfs2_find_entry(name, namelen, dir, &lookup) == 0) { + ret = ocfs2_find_entry(name, namelen, dir, &lookup); + if (ret == 0) { ret = -EEXIST; mlog_errno(ret); + } else if (ret == -ENOENT) { + ret = 0; } ocfs2_free_dir_lookup_result(&lookup); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index c28bc983a7b1..d96cacf31f53 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -3108,6 +3108,7 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos) struct ocfs2_lock_res *iter = v; struct ocfs2_lock_res *dummy = &priv->p_iter_res; + (*pos)++; spin_lock(&ocfs2_dlm_tracking_lock); iter = ocfs2_dlm_next_res(iter, priv); list_del_init(&dummy->l_debug_list); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index e96b947c3f5d..e29e267472bf 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2398,6 +2398,8 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, } else inode_lock(inode); + ocfs2_iocb_init_rw_locked(iocb); + /* * Concurrent O_DIRECT writes are allowed with * mount_option "coherency=buffered". @@ -2544,6 +2546,8 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb, if (!direct_io && nowait) return -EOPNOTSUPP; + ocfs2_iocb_init_rw_locked(iocb); + /* * buffered reads protect themselves in ->read_folio(). O_DIRECT reads * need locks to protect pending reads from racing with truncate. diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 775766856a54..7f5fd16d3740 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -173,7 +173,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb) struct ocfs2_recovery_map *rm; mutex_init(&osb->recovery_lock); - osb->disable_recovery = 0; + osb->recovery_state = OCFS2_REC_ENABLED; osb->recovery_thread_task = NULL; init_waitqueue_head(&osb->recovery_event); @@ -192,31 +192,53 @@ int ocfs2_recovery_init(struct ocfs2_super *osb) return 0; } -/* we can't grab the goofy sem lock from inside wait_event, so we use - * memory barriers to make sure that we'll see the null task before - * being woken up */ static int ocfs2_recovery_thread_running(struct ocfs2_super *osb) { - mb(); return osb->recovery_thread_task != NULL; } +static void ocfs2_recovery_disable(struct ocfs2_super *osb, + enum ocfs2_recovery_state state) +{ + mutex_lock(&osb->recovery_lock); + /* + * If recovery thread is not running, we can directly transition to + * final state. + */ + if (!ocfs2_recovery_thread_running(osb)) { + osb->recovery_state = state + 1; + goto out_lock; + } + osb->recovery_state = state; + /* Wait for recovery thread to acknowledge state transition */ + wait_event_cmd(osb->recovery_event, + !ocfs2_recovery_thread_running(osb) || + osb->recovery_state >= state + 1, + mutex_unlock(&osb->recovery_lock), + mutex_lock(&osb->recovery_lock)); +out_lock: + mutex_unlock(&osb->recovery_lock); + + /* + * At this point we know that no more recovery work can be queued so + * wait for any recovery completion work to complete. + */ + if (osb->ocfs2_wq) + flush_workqueue(osb->ocfs2_wq); +} + +void ocfs2_recovery_disable_quota(struct ocfs2_super *osb) +{ + ocfs2_recovery_disable(osb, OCFS2_REC_QUOTA_WANT_DISABLE); +} + void ocfs2_recovery_exit(struct ocfs2_super *osb) { struct ocfs2_recovery_map *rm; /* disable any new recovery threads and wait for any currently * running ones to exit. Do this before setting the vol_state. */ - mutex_lock(&osb->recovery_lock); - osb->disable_recovery = 1; - mutex_unlock(&osb->recovery_lock); - wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb)); - - /* At this point, we know that no more recovery threads can be - * launched, so wait for any recovery completion work to - * complete. */ - if (osb->ocfs2_wq) - flush_workqueue(osb->ocfs2_wq); + ocfs2_recovery_disable(osb, OCFS2_REC_WANT_DISABLE); /* * Now that recovery is shut down, and the osb is about to be @@ -1439,6 +1461,18 @@ static int __ocfs2_recovery_thread(void *arg) } } restart: + if (quota_enabled) { + mutex_lock(&osb->recovery_lock); + /* Confirm that recovery thread will no longer recover quotas */ + if (osb->recovery_state == OCFS2_REC_QUOTA_WANT_DISABLE) { + osb->recovery_state = OCFS2_REC_QUOTA_DISABLED; + wake_up(&osb->recovery_event); + } + if (osb->recovery_state >= OCFS2_REC_QUOTA_DISABLED) + quota_enabled = 0; + mutex_unlock(&osb->recovery_lock); + } + status = ocfs2_super_lock(osb, 1); if (status < 0) { mlog_errno(status); @@ -1536,27 +1570,29 @@ bail: ocfs2_free_replay_slots(osb); osb->recovery_thread_task = NULL; - mb(); /* sync with ocfs2_recovery_thread_running */ + if (osb->recovery_state == OCFS2_REC_WANT_DISABLE) + osb->recovery_state = OCFS2_REC_DISABLED; wake_up(&osb->recovery_event); mutex_unlock(&osb->recovery_lock); - if (quota_enabled) - kfree(rm_quota); + kfree(rm_quota); return status; } void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num) { + int was_set = -1; + mutex_lock(&osb->recovery_lock); + if (osb->recovery_state < OCFS2_REC_WANT_DISABLE) + was_set = ocfs2_recovery_map_set(osb, node_num); trace_ocfs2_recovery_thread(node_num, osb->node_num, - osb->disable_recovery, osb->recovery_thread_task, - osb->disable_recovery ? - -1 : ocfs2_recovery_map_set(osb, node_num)); + osb->recovery_state, osb->recovery_thread_task, was_set); - if (osb->disable_recovery) + if (osb->recovery_state >= OCFS2_REC_WANT_DISABLE) goto out; if (osb->recovery_thread_task) diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 689c340c6363..5ebde794a653 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h @@ -148,6 +148,7 @@ void ocfs2_wait_for_recovery(struct ocfs2_super *osb); int ocfs2_recovery_init(struct ocfs2_super *osb); void ocfs2_recovery_exit(struct ocfs2_super *osb); +void ocfs2_recovery_disable_quota(struct ocfs2_super *osb); int ocfs2_compute_replay_slots(struct ocfs2_super *osb); void ocfs2_free_replay_slots(struct ocfs2_super *osb); diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c index d54441b4c6ca..c4426d12a2ad 100644 --- a/fs/ocfs2/localalloc.c +++ b/fs/ocfs2/localalloc.c @@ -1008,25 +1008,6 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb, start = bit_off + 1; } - /* clear the contiguous bits until the end boundary */ - if (count) { - blkno = la_start_blk + - ocfs2_clusters_to_blocks(osb->sb, - start - count); - - trace_ocfs2_sync_local_to_main_free( - count, start - count, - (unsigned long long)la_start_blk, - (unsigned long long)blkno); - - status = ocfs2_release_clusters(handle, - main_bm_inode, - main_bm_bh, blkno, - count); - if (status < 0) - mlog_errno(status); - } - bail: if (status) mlog_errno(status); diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 8a0fa51c9ac6..63b06377f630 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -200,8 +200,10 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode) mode = mode_strip_sgid(&init_user_ns, dir, mode); inode_init_owner(&init_user_ns, inode, dir, mode); status = dquot_initialize(inode); - if (status) + if (status) { + iput(inode); return ERR_PTR(status); + } return inode; } diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 740b64238312..22882c636bfb 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -284,6 +284,21 @@ enum ocfs2_mount_options #define OCFS2_OSB_ERROR_FS 0x0004 #define OCFS2_DEFAULT_ATIME_QUANTUM 60 +enum ocfs2_recovery_state { + OCFS2_REC_ENABLED = 0, + OCFS2_REC_QUOTA_WANT_DISABLE, + /* + * Must be OCFS2_REC_QUOTA_WANT_DISABLE + 1 for + * ocfs2_recovery_disable_quota() to work. + */ + OCFS2_REC_QUOTA_DISABLED, + OCFS2_REC_WANT_DISABLE, + /* + * Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work + */ + OCFS2_REC_DISABLED, +}; + struct ocfs2_journal; struct ocfs2_slot_info; struct ocfs2_recovery_map; @@ -346,7 +361,7 @@ struct ocfs2_super struct ocfs2_recovery_map *recovery_map; struct ocfs2_replay_map *replay_map; struct task_struct *recovery_thread_task; - int disable_recovery; + enum ocfs2_recovery_state recovery_state; wait_queue_head_t checkpoint_event; struct ocfs2_journal *journal; unsigned long osb_commit_interval; diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index dc9f76ab7e13..24b031dc44ee 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -749,6 +749,11 @@ static int ocfs2_release_dquot(struct dquot *dquot) handle = ocfs2_start_trans(osb, ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type)); if (IS_ERR(handle)) { + /* + * Mark dquot as inactive to avoid endless cycle in + * quota_release_workfn(). + */ + clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); status = PTR_ERR(handle); mlog_errno(status); goto out_ilock; @@ -881,7 +886,7 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid) int status = 0; trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type); - if (!sb_has_quota_loaded(sb, type)) { + if (!sb_has_quota_active(sb, type)) { status = -ESRCH; goto out; } diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 404ca3a62508..0ca8975a1df4 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -453,8 +453,7 @@ out: /* Sync changes in local quota file into global quota file and * reinitialize local quota file. - * The function expects local quota file to be already locked and - * s_umount locked in shared mode. */ + * The function expects local quota file to be already locked. */ static int ocfs2_recover_local_quota_file(struct inode *lqinode, int type, struct ocfs2_quota_recovery *rec) @@ -585,7 +584,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, { unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE, LOCAL_GROUP_QUOTA_SYSTEM_INODE }; - struct super_block *sb = osb->sb; struct ocfs2_local_disk_dqinfo *ldinfo; struct buffer_head *bh; handle_t *handle; @@ -597,7 +595,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb, printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for " "slot %u\n", osb->dev_str, slot_num); - down_read(&sb->s_umount); for (type = 0; type < OCFS2_MAXQUOTAS; type++) { if (list_empty(&(rec->r_list[type]))) continue; @@ -674,7 +671,6 @@ out_put: break; } out: - up_read(&sb->s_umount); kfree(rec); return status; } @@ -815,7 +811,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type) struct ocfs2_quota_chunk *chunk; struct ocfs2_local_disk_chunk *dchunk; int mark_clean = 1, len; - int status; + int status = 0; iput(oinfo->dqi_gqinode); ocfs2_simple_drop_lockres(OCFS2_SB(sb), &oinfo->dqi_gqlock); @@ -840,8 +836,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type) ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk); /* - * s_umount held in exclusive mode protects us against racing with - * recovery thread... + * ocfs2_dismount_volume() has already aborted quota recovery... */ if (oinfo->dqi_rec) { ocfs2_free_quota_recovery(oinfo->dqi_rec); @@ -857,17 +852,15 @@ static int ocfs2_local_free_info(struct super_block *sb, int type) oinfo->dqi_libh, olq_update_info, info); - if (status < 0) { + if (status < 0) mlog_errno(status); - goto out; - } - out: ocfs2_inode_unlock(sb_dqopt(sb)->files[type], 1); brelse(oinfo->dqi_libh); brelse(oinfo->dqi_lqi_bh); kfree(oinfo); - return 0; + info->dqi_priv = NULL; + return status; } static void olq_set_dquot(struct buffer_head *bh, void *private) diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c index d65d43c61857..b2b47bb79529 100644 --- a/fs/ocfs2/resize.c +++ b/fs/ocfs2/resize.c @@ -566,6 +566,8 @@ out_commit: ocfs2_commit_trans(osb, handle); out_free_group_bh: + if (ret < 0) + ocfs2_remove_from_cache(INODE_CACHE(inode), group_bh); brelse(group_bh); out_unlock: diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 988d1c076861..b30f9d71a35d 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1869,6 +1869,9 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) /* Orphan scan should be stopped as early as possible */ ocfs2_orphan_scan_stop(osb); + /* Stop quota recovery so that we can disable quotas */ + ocfs2_recovery_disable_quota(osb); + ocfs2_disable_quotas(osb); /* All dquots should be freed by now */ @@ -2321,6 +2324,7 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, struct ocfs2_blockcheck_stats *stats) { int status = -EAGAIN; + u32 blksz_bits; if (memcmp(di->i_signature, OCFS2_SUPER_BLOCK_SIGNATURE, strlen(OCFS2_SUPER_BLOCK_SIGNATURE)) == 0) { @@ -2335,11 +2339,15 @@ static int ocfs2_verify_volume(struct ocfs2_dinode *di, goto out; } status = -EINVAL; - if ((1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits)) != blksz) { + /* Acceptable block sizes are 512 bytes, 1K, 2K and 4K. */ + blksz_bits = le32_to_cpu(di->id2.i_super.s_blocksize_bits); + if (blksz_bits < 9 || blksz_bits > 12) { mlog(ML_ERROR, "found superblock with incorrect block " - "size: found %u, should be %u\n", - 1 << le32_to_cpu(di->id2.i_super.s_blocksize_bits), - blksz); + "size bits: found %u, should be 9, 10, 11, or 12\n", + blksz_bits); + } else if ((1 << blksz_bits) != blksz) { + mlog(ML_ERROR, "found superblock with incorrect block " + "size: found %u, should be %u\n", 1 << blksz_bits, blksz); } else if (le16_to_cpu(di->id2.i_super.s_major_rev_level) != OCFS2_MAJOR_REV_LEVEL || le16_to_cpu(di->id2.i_super.s_minor_rev_level) != diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index d4c5fdcfa1e4..f5cf2255dc09 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -65,7 +65,7 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio) if (status < 0) { mlog_errno(status); - return status; + goto out; } fe = (struct ocfs2_dinode *) bh->b_data; @@ -76,9 +76,10 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio) memcpy(kaddr, link, len + 1); kunmap_atomic(kaddr); SetPageUptodate(page); +out: unlock_page(page); brelse(bh); - return 0; + return status; } const struct address_space_operations ocfs2_fast_symlink_aops = { diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index b3bbb5a5787a..cc81ff6ac735 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -23,9 +23,9 @@ static int orangefs_writepage_locked(struct page *page, struct orangefs_write_range *wr = NULL; struct iov_iter iter; struct bio_vec bv; - size_t len, wlen; + size_t wlen; ssize_t ret; - loff_t off; + loff_t len, off; set_page_writeback(page); @@ -94,8 +94,7 @@ static int orangefs_writepages_work(struct orangefs_writepages *ow, struct orangefs_write_range *wrp, wr; struct iov_iter iter; ssize_t ret; - size_t len; - loff_t off; + loff_t len, off; int i; len = i_size_read(inode); diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index 1b508f543384..fa41db088488 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -393,9 +393,9 @@ static ssize_t orangefs_debug_write(struct file *file, * Thwart users who try to jamb a ridiculous number * of bytes into the debug file... */ - if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) { + if (count > ORANGEFS_MAX_DEBUG_STRING_LEN) { silly = count; - count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1; + count = ORANGEFS_MAX_DEBUG_STRING_LEN; } buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL); diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 86d4b6975dbc..203b88293f6b 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -532,7 +532,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c) err = PTR_ERR(upper); if (!IS_ERR(upper)) { err = ovl_do_link(ofs, ovl_dentry_upper(c->dentry), udir, upper); - dput(upper); if (!err) { /* Restore timestamps on parent (best effort) */ @@ -540,6 +539,7 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c) ovl_dentry_set_upper_alias(c->dentry); ovl_dentry_update_reval(c->dentry, upper); } + dput(upper); } inode_unlock(udir); if (err) diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index c062f7e2ecb5..330c31bc99ec 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -593,8 +593,13 @@ static int ovl_security_fileattr(const struct path *realpath, struct fileattr *f struct file *file; unsigned int cmd; int err; + unsigned int flags; - file = dentry_open(realpath, O_RDONLY, current_cred()); + flags = O_RDONLY; + if (force_o_largefile()) + flags |= O_LARGEFILE; + + file = dentry_open(realpath, flags, current_cred()); if (IS_ERR(file)) return PTR_ERR(file); diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 0d8f96168e6c..83cd20f79c5c 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -135,6 +135,9 @@ void ovl_dentry_init_flags(struct dentry *dentry, struct dentry *upperdentry, bool ovl_dentry_weird(struct dentry *dentry) { + if (!d_can_lookup(dentry) && !d_is_file(dentry) && !d_is_symlink(dentry)) + return true; + return dentry->d_flags & (DCACHE_NEED_AUTOMOUNT | DCACHE_MANAGE_TRANSIT | DCACHE_OP_HASH | diff --git a/fs/proc/array.c b/fs/proc/array.c index d210b2f8b7ed..86fde69ec11a 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -490,7 +490,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, * a program is not able to use ptrace(2) in that case. It is * safe because the task has stopped executing permanently. */ - if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE))) { + if (permitted && (task->flags & (PF_EXITING|PF_DUMPCORE|PF_POSTCOREDUMP))) { if (try_get_task_stack(task)) { eip = KSTK_EIP(task); esp = KSTK_ESP(task); diff --git a/fs/proc/base.c b/fs/proc/base.c index ecc45389ea79..fb169bdfd9a8 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -415,7 +415,7 @@ static const struct file_operations proc_pid_cmdline_ops = { #ifdef CONFIG_KALLSYMS /* * Provides a wchan file via kallsyms in a proper one-value-per-file format. - * Returns the resolved symbol. If that fails, simply return the address. + * Returns the resolved symbol to user space. */ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) @@ -2633,10 +2633,11 @@ static ssize_t timerslack_ns_write(struct file *file, const char __user *buf, } task_lock(p); - if (slack_ns == 0) - p->timer_slack_ns = p->default_timer_slack_ns; - else - p->timer_slack_ns = slack_ns; + if (task_is_realtime(p)) + slack_ns = 0; + else if (slack_ns == 0) + slack_ns = p->default_timer_slack_ns; + p->timer_slack_ns = slack_ns; task_unlock(p); out: diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 587b91d9d998..b721bb88b4a6 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -558,10 +558,16 @@ struct proc_dir_entry *proc_create_reg(const char *name, umode_t mode, return p; } -static inline void pde_set_flags(struct proc_dir_entry *pde) +static void pde_set_flags(struct proc_dir_entry *pde) { if (pde->proc_ops->proc_flags & PROC_ENTRY_PERMANENT) pde->flags |= PROC_ENTRY_PERMANENT; + if (pde->proc_ops->proc_read_iter) + pde->flags |= PROC_ENTRY_proc_read_iter; +#ifdef CONFIG_COMPAT + if (pde->proc_ops->proc_compat_ioctl) + pde->flags |= PROC_ENTRY_proc_compat_ioctl; +#endif } struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, @@ -625,6 +631,7 @@ struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode, p->proc_ops = &proc_seq_ops; p->seq_ops = ops; p->state_size = state_size; + pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_seq_private); @@ -655,6 +662,7 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode, return NULL; p->proc_ops = &proc_single_ops; p->single_show = show; + pde_set_flags(p); return proc_register(parent, p); } EXPORT_SYMBOL(proc_create_single_data); diff --git a/fs/proc/inode.c b/fs/proc/inode.c index f495fdb39151..025490480be1 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -679,13 +679,13 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) if (S_ISREG(inode->i_mode)) { inode->i_op = de->proc_iops; - if (de->proc_ops->proc_read_iter) + if (pde_has_proc_read_iter(de)) inode->i_fop = &proc_iter_file_ops; else inode->i_fop = &proc_reg_file_ops; #ifdef CONFIG_COMPAT - if (de->proc_ops->proc_compat_ioctl) { - if (de->proc_ops->proc_read_iter) + if (pde_has_proc_compat_ioctl(de)) { + if (pde_has_proc_read_iter(de)) inode->i_fop = &proc_iter_file_ops_compat; else inode->i_fop = &proc_reg_file_ops_compat; diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 6b921826d85b..d115d22c01d4 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -84,6 +84,20 @@ static inline void pde_make_permanent(struct proc_dir_entry *pde) pde->flags |= PROC_ENTRY_PERMANENT; } +static inline bool pde_has_proc_read_iter(const struct proc_dir_entry *pde) +{ + return pde->flags & PROC_ENTRY_proc_read_iter; +} + +static inline bool pde_has_proc_compat_ioctl(const struct proc_dir_entry *pde) +{ +#ifdef CONFIG_COMPAT + return pde->flags & PROC_ENTRY_proc_compat_ioctl; +#else + return false; +#endif +} + extern struct kmem_cache *proc_dir_entry_cache; void pde_free(struct proc_dir_entry *pde); diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index a2d430549012..d4812f2f7712 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -496,13 +496,13 @@ read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) * the previous entry, search for a matching entry. */ if (!m || start < m->addr || start >= m->addr + m->size) { - struct kcore_list *iter; + struct kcore_list *pos; m = NULL; - list_for_each_entry(iter, &kclist_head, list) { - if (start >= iter->addr && - start < iter->addr + iter->size) { - m = iter; + list_for_each_entry(pos, &kclist_head, list) { + if (start >= pos->addr && + start < pos->addr + pos->size) { + m = pos; break; } } @@ -585,6 +585,7 @@ read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) ret = -EFAULT; goto out; } + ret = 0; /* * We know the bounce buffer is safe to copy from, so * use _copy_to_iter() directly. diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c index f4616083faef..04bb29721419 100644 --- a/fs/proc/softirqs.c +++ b/fs/proc/softirqs.c @@ -20,7 +20,7 @@ static int show_softirqs(struct seq_file *p, void *v) for (i = 0; i < NR_SOFTIRQS; i++) { seq_printf(p, "%12s:", softirq_to_name[i]); for_each_possible_cpu(j) - seq_printf(p, " %10u", kstat_softirqs_cpu(i, j)); + seq_put_decimal_ull_width(p, " ", kstat_softirqs_cpu(i, j), 10); seq_putc(p, '\n'); } return 0; diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 147cc771d5a6..3f5a4a5e1a82 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -404,6 +404,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos) if (!iov_iter_count(iter)) return acc; } + + cond_resched(); } return acc; diff --git a/fs/pstore/blk.c b/fs/pstore/blk.c index 4ae0cfcd15f2..c6911c999762 100644 --- a/fs/pstore/blk.c +++ b/fs/pstore/blk.c @@ -89,7 +89,7 @@ static struct pstore_device_info *pstore_device_info; _##name_ = check_size(name, alignsize); \ else \ _##name_ = 0; \ - /* Synchronize module parameters with resuls. */ \ + /* Synchronize module parameters with results. */ \ name = _##name_ / 1024; \ dev->zone.name = _##name_; \ } @@ -121,7 +121,7 @@ static int __register_pstore_device(struct pstore_device_info *dev) if (pstore_device_info) return -EBUSY; - /* zero means not limit on which backends to attempt to store. */ + /* zero means no limit on which backends attempt to store. */ if (!dev->flags) dev->flags = UINT_MAX; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index f7ab6b44011b..0f82db69d2d8 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -690,6 +690,8 @@ int dquot_writeback_dquots(struct super_block *sb, int type) WARN_ON_ONCE(!rwsem_is_locked(&sb->s_umount)); + flush_delayed_work("a_release_work); + for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (type != -1 && cnt != type) continue; diff --git a/fs/select.c b/fs/select.c index d4d881d439dc..e66b6189845e 100644 --- a/fs/select.c +++ b/fs/select.c @@ -77,19 +77,16 @@ u64 select_estimate_accuracy(struct timespec64 *tv) { u64 ret; struct timespec64 now; + u64 slack = current->timer_slack_ns; - /* - * Realtime tasks get a slack of 0 for obvious reasons. - */ - - if (rt_task(current)) + if (slack == 0) return 0; ktime_get_ts64(&now); now = timespec64_sub(*tv, now); ret = __estimate_accuracy(&now); - if (ret < current->timer_slack_ns) - return current->timer_slack_ns; + if (ret < slack) + return slack; return ret; } @@ -788,7 +785,7 @@ static inline int get_sigset_argpack(struct sigset_argpack *to, } return 0; Efault: - user_access_end(); + user_read_access_end(); return -EFAULT; } @@ -1361,7 +1358,7 @@ static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to, } return 0; Efault: - user_access_end(); + user_read_access_end(); return -EFAULT; } diff --git a/fs/smb/client/asn1.c b/fs/smb/client/asn1.c index b5724ef9f182..214a44509e7b 100644 --- a/fs/smb/client/asn1.c +++ b/fs/smb/client/asn1.c @@ -52,6 +52,8 @@ int cifs_neg_token_init_mech_type(void *context, size_t hdrlen, server->sec_kerberos = true; else if (oid == OID_ntlmssp) server->sec_ntlmssp = true; + else if (oid == OID_IAKerb) + server->sec_iakerb = true; else { char buf[50]; diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c index 2ca1881919c7..d09226c1ac90 100644 --- a/fs/smb/client/cached_dir.c +++ b/fs/smb/client/cached_dir.c @@ -354,7 +354,7 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon, spin_lock(&cfids->cfid_list_lock); list_for_each_entry(cfid, &cfids->entries, entry) { if (dentry && cfid->dentry == dentry) { - cifs_dbg(FYI, "found a cached root file handle by dentry\n"); + cifs_dbg(FYI, "found a cached file handle by dentry\n"); kref_get(&cfid->refcount); *ret_cfid = cfid; spin_unlock(&cfids->cfid_list_lock); diff --git a/fs/smb/client/cifs_dfs_ref.c b/fs/smb/client/cifs_dfs_ref.c index 020e71fe1454..876f9a43a99d 100644 --- a/fs/smb/client/cifs_dfs_ref.c +++ b/fs/smb/client/cifs_dfs_ref.c @@ -258,6 +258,31 @@ compose_mount_options_err: goto compose_mount_options_out; } +static int set_dest_addr(struct smb3_fs_context *ctx, const char *full_path) +{ + struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; + char *str_addr = NULL; + int rc; + + rc = dns_resolve_server_name_to_ip(full_path, &str_addr, NULL); + if (rc < 0) + goto out; + + rc = cifs_convert_address(addr, str_addr, strlen(str_addr)); + if (!rc) { + cifs_dbg(FYI, "%s: failed to convert ip address\n", __func__); + rc = -EINVAL; + goto out; + } + + cifs_set_port(addr, ctx->port); + rc = 0; + +out: + kfree(str_addr); + return rc; +} + /* * Create a vfsmount that we can automount */ @@ -295,8 +320,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path) ctx = smb3_fc2context(fc); page = alloc_dentry_path(); - /* always use tree name prefix */ - full_path = build_path_from_dentry_optional_prefix(mntpt, page, true); + full_path = dfs_get_automount_devname(mntpt, page); if (IS_ERR(full_path)) { mnt = ERR_CAST(full_path); goto out; @@ -315,6 +339,12 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path) goto out; } + rc = set_dest_addr(ctx, full_path); + if (rc) { + mnt = ERR_PTR(rc); + goto out; + } + rc = smb3_parse_devname(full_path, ctx); if (!rc) mnt = fc_mount(fc); diff --git a/fs/smb/client/cifs_spnego.c b/fs/smb/client/cifs_spnego.c index 1e6819daaaa7..8b58f494235f 100644 --- a/fs/smb/client/cifs_spnego.c +++ b/fs/smb/client/cifs_spnego.c @@ -130,11 +130,13 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo, dp = description + strlen(description); - /* for now, only sec=krb5 and sec=mskrb5 are valid */ + /* for now, only sec=krb5 and sec=mskrb5 and iakerb are valid */ if (server->sec_kerberos) sprintf(dp, ";sec=krb5"); else if (server->sec_mskerberos) sprintf(dp, ";sec=mskrb5"); + else if (server->sec_iakerb) + sprintf(dp, ";sec=iakerb"); else { cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n"); sprintf(dp, ";sec=krb5"); diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 1d19c9e75277..17fce0afb297 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -148,6 +148,7 @@ enum securityEnum { NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */ RawNTLMSSP, /* NTLMSSP without SPNEGO, NTLMv2 hash */ Kerberos, /* Kerberos via SPNEGO */ + IAKerb, /* Kerberos proxy */ }; struct session_key { @@ -286,7 +287,7 @@ struct smb_version_operations { int (*handle_cancelled_mid)(struct mid_q_entry *, struct TCP_Server_Info *); void (*downgrade_oplock)(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache); + __u16 epoch, bool *purge_cache); /* process transaction2 response */ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *, char *, int); @@ -466,12 +467,12 @@ struct smb_version_operations { /* if we can do cache read operations */ bool (*is_read_op)(__u32); /* set oplock level for the inode */ - void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int, - bool *); + void (*set_oplock_level)(struct cifsInodeInfo *cinode, __u32 oplock, __u16 epoch, + bool *purge_cache); /* create lease context buffer for CREATE request */ char * (*create_lease_buf)(u8 *lease_key, u8 oplock); /* parse lease context buffer and return oplock/epoch info */ - __u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey); + __u8 (*parse_lease_buf)(void *buf, __u16 *epoch, char *lkey); ssize_t (*copychunk_range)(const unsigned int, struct cifsFileInfo *src_file, struct cifsFileInfo *target_file, @@ -685,6 +686,7 @@ struct TCP_Server_Info { bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ + bool sec_iakerb; /* supports pass-through auth for Kerberos (krb5 proxy) */ bool large_buf; /* is current buffer large? */ /* use SMBD connection instead of socket */ bool rdma; @@ -1328,7 +1330,7 @@ struct cifs_fid { __u8 create_guid[16]; __u32 access; struct cifs_pending_open *pending_open; - unsigned int epoch; + __u16 epoch; #ifdef CONFIG_CIFS_DEBUG2 __u64 mid; #endif /* CIFS_DEBUG2 */ @@ -1360,7 +1362,7 @@ struct cifsFileInfo { bool swapfile:1; bool oplock_break_cancelled:1; bool offload:1; /* offload final part of _put to a wq */ - unsigned int oplock_epoch; /* epoch from the lease break */ + __u16 oplock_epoch; /* epoch from the lease break */ __u32 oplock_level; /* oplock/lease level from the lease break */ int count; spinlock_t file_info_lock; /* protects four flag/count fields above */ @@ -1510,7 +1512,7 @@ struct cifsInodeInfo { spinlock_t open_file_lock; /* protects openFileList */ __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ unsigned int oplock; /* oplock/lease level we have */ - unsigned int epoch; /* used to track lease state changes */ + __u16 epoch; /* used to track lease state changes */ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */ #define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */ @@ -2049,6 +2051,8 @@ static inline char *get_security_type_str(enum securityEnum sectype) return "Kerberos"; case NTLMv2: return "NTLMv2"; + case IAKerb: + return "IAKerb"; default: return "Unknown"; } diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index f37e4da0fe40..9a30425b75a9 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -30,6 +30,9 @@ extern void cifs_small_buf_release(void *); extern void free_rsp_buf(int, void *); extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *, unsigned int /* length */); +extern int smb_send_kvec(struct TCP_Server_Info *server, + struct msghdr *msg, + size_t *sent); extern unsigned int _get_xid(void); extern void _free_xid(unsigned int); #define get_xid() \ @@ -57,8 +60,29 @@ extern void exit_cifs_idmap(void); extern int init_cifs_spnego(void); extern void exit_cifs_spnego(void); extern const char *build_path_from_dentry(struct dentry *, void *); +char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, + const char *tree, int tree_len, + bool prefix); extern char *build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, bool prefix); + +#ifdef CONFIG_CIFS_DFS_UPCALL +static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page) +{ + struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); + struct TCP_Server_Info *server = tcon->ses->server; + + if (unlikely(!server->origin_fullpath)) + return ERR_PTR(-EREMOTE); + + return __build_path_from_dentry_optional_prefix(dentry, page, + server->origin_fullpath, + strlen(server->origin_fullpath), + true); +} +#endif + static inline void *alloc_dentry_path(void) { return __getname(); @@ -152,6 +176,8 @@ extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool); extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, struct cifsFileInfo **ret_file); +extern int cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode, + struct file *file); extern unsigned int smbCalcSize(void *buf); extern int decode_negTokenInit(unsigned char *security_blob, int length, struct TCP_Server_Info *server); diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 67c5fc2b2db9..6077fe1dcc9c 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -5349,7 +5349,7 @@ SetTimesRetry: param_offset = offsetof(struct smb_com_transaction2_spi_req, InformationLevel) - 4; offset = param_offset + params; - data_offset = (char *) (&pSMB->hdr.Protocol) + offset; + data_offset = (char *)pSMB + offsetof(typeof(*pSMB), hdr.Protocol) + offset; pSMB->ParameterOffset = cpu_to_le16(param_offset); pSMB->DataOffset = cpu_to_le16(offset); pSMB->SetupCount = 1; diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c index 87ce71b39b77..76c04c4ed45f 100644 --- a/fs/smb/client/connect.c +++ b/fs/smb/client/connect.c @@ -259,7 +259,13 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, spin_lock(&cifs_tcp_ses_lock); list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { - /* check if iface is still active */ + spin_lock(&ses->ses_lock); + if (ses->ses_status == SES_EXITING) { + spin_unlock(&ses->ses_lock); + continue; + } + spin_unlock(&ses->ses_lock); + spin_lock(&ses->chan_lock); if (!cifs_chan_is_iface_active(ses, server)) { spin_unlock(&ses->chan_lock); @@ -1875,9 +1881,8 @@ out_err: /* this function must be called with ses_lock and chan_lock held */ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) { - if (ctx->sectype != Unspecified && - ctx->sectype != ses->sectype) - return 0; + struct TCP_Server_Info *server = ses->server; + enum securityEnum ctx_sec, ses_sec; /* * If an existing session is limited to less channels than @@ -1886,11 +1891,20 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) if (ses->chan_max < ctx->max_channels) return 0; - switch (ses->sectype) { + ctx_sec = server->ops->select_sectype(server, ctx->sectype); + ses_sec = server->ops->select_sectype(server, ses->sectype); + + if (ctx_sec != ses_sec) + return 0; + + switch (ctx_sec) { + case IAKerb: case Kerberos: if (!uid_eq(ctx->cred_uid, ses->cred_uid)) return 0; break; + case NTLMv2: + case RawNTLMSSP: default: /* NULL username means anonymous session */ if (ses->user_name == NULL) { @@ -1977,31 +1991,6 @@ out: return rc; } -/** - * cifs_free_ipc - helper to release the session IPC tcon - * @ses: smb session to unmount the IPC from - * - * Needs to be called everytime a session is destroyed. - * - * On session close, the IPC is closed and the server must release all tcons of the session. - * No need to send a tree disconnect here. - * - * Besides, it will make the server to not close durable and resilient files on session close, as - * specified in MS-SMB2 3.3.5.6 Receiving an SMB2 LOGOFF Request. - */ -static int -cifs_free_ipc(struct cifs_ses *ses) -{ - struct cifs_tcon *tcon = ses->tcon_ipc; - - if (tcon == NULL) - return 0; - - tconInfoFree(tcon); - ses->tcon_ipc = NULL; - return 0; -} - static struct cifs_ses * cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { @@ -2035,35 +2024,44 @@ void cifs_put_smb_ses(struct cifs_ses *ses) { unsigned int rc, xid; unsigned int chan_count; + bool do_logoff; + struct cifs_tcon *tcon; struct TCP_Server_Info *server = ses->server; - spin_lock(&ses->ses_lock); - if (ses->ses_status == SES_EXITING) { - spin_unlock(&ses->ses_lock); - return; - } - spin_unlock(&ses->ses_lock); - - cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); - cifs_dbg(FYI, - "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->tree_name : "NONE"); - spin_lock(&cifs_tcp_ses_lock); - if (--ses->ses_count > 0) { + spin_lock(&ses->ses_lock); + cifs_dbg(FYI, "%s: id=0x%llx ses_count=%d ses_status=%u ipc=%s\n", + __func__, ses->Suid, ses->ses_count, ses->ses_status, + ses->tcon_ipc ? ses->tcon_ipc->tree_name : "none"); + if (ses->ses_status == SES_EXITING || --ses->ses_count > 0) { + spin_unlock(&ses->ses_lock); spin_unlock(&cifs_tcp_ses_lock); return; } - spin_unlock(&cifs_tcp_ses_lock); - /* ses_count can never go negative */ WARN_ON(ses->ses_count < 0); - if (ses->ses_status == SES_GOOD) - ses->ses_status = SES_EXITING; + spin_lock(&ses->chan_lock); + cifs_chan_clear_need_reconnect(ses, server); + spin_unlock(&ses->chan_lock); - cifs_free_ipc(ses); + do_logoff = ses->ses_status == SES_GOOD && server->ops->logoff; + ses->ses_status = SES_EXITING; + tcon = ses->tcon_ipc; + ses->tcon_ipc = NULL; + spin_unlock(&ses->ses_lock); + spin_unlock(&cifs_tcp_ses_lock); - if (ses->ses_status == SES_EXITING && server->ops->logoff) { + /* + * On session close, the IPC is closed and the server must release all + * tcons of the session. No need to send a tree disconnect here. + * + * Besides, it will make the server to not close durable and resilient + * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an + * SMB2 LOGOFF Request. + */ + tconInfoFree(tcon); + if (do_logoff) { xid = get_xid(); rc = server->ops->logoff(xid, ses); if (rc) @@ -2419,6 +2417,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) return 0; if (tcon->nodelete != ctx->nodelete) return 0; + if (tcon->posix_extensions != ctx->linux_ext) + return 0; return 1; } @@ -2966,8 +2966,10 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) * sessinit is sent but no second negprot */ struct rfc1002_session_packet req = {}; - struct smb_hdr *smb_buf = (struct smb_hdr *)&req; + struct msghdr msg = {}; + struct kvec iov = {}; unsigned int len; + size_t sent; req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name); @@ -2996,10 +2998,18 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) * As per rfc1002, @len must be the number of bytes that follows the * length field of a rfc1002 session request payload. */ - len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req); + len = sizeof(req.trailer.session_req); + req.type = RFC1002_SESSION_REQUEST; + req.flags = 0; + req.length = cpu_to_be16(len); + len += offsetof(typeof(req), trailer.session_req); + iov.iov_base = &req; + iov.iov_len = len; + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, len); + rc = smb_send_kvec(server, &msg, &sent); + if (rc < 0 || len != sent) + return (rc == -EINTR || rc == -EAGAIN) ? rc : -ECONNABORTED; - smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len); - rc = smb_send(server, smb_buf, len); /* * RFC1001 layer in at least one server requires very short break before * negprot presumably because not expecting negprot to follow so fast. @@ -3008,7 +3018,7 @@ ip_rfc1001_connect(struct TCP_Server_Info *server) */ usleep_range(1000, 2000); - return rc; + return 0; } static int @@ -4178,11 +4188,13 @@ int cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, struct TCP_Server_Info *server) { + bool in_retry = false; int rc = 0; if (!server->ops->need_neg || !server->ops->negotiate) return -ENOSYS; +retry: /* only send once per connect */ spin_lock(&server->srv_lock); if (server->tcpStatus != CifsGood && @@ -4202,6 +4214,14 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, spin_unlock(&server->srv_lock); rc = server->ops->negotiate(xid, ses, server); + if (rc == -EAGAIN) { + /* Allow one retry attempt */ + if (!in_retry) { + in_retry = true; + goto retry; + } + rc = -EHOSTDOWN; + } if (rc == 0) { spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate) diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c index 863c7bc3db86..477302157ab3 100644 --- a/fs/smb/client/dir.c +++ b/fs/smb/client/dir.c @@ -78,14 +78,13 @@ build_path_from_dentry(struct dentry *direntry, void *page) prefix); } -char * -build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, - bool prefix) +char *__build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, + const char *tree, int tree_len, + bool prefix) { int dfsplen; int pplen = 0; struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); - struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); char dirsep = CIFS_DIR_SEP(cifs_sb); char *s; @@ -93,7 +92,7 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, return ERR_PTR(-ENOMEM); if (prefix) - dfsplen = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1); + dfsplen = strnlen(tree, tree_len + 1); else dfsplen = 0; @@ -123,7 +122,7 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, } if (dfsplen) { s -= dfsplen; - memcpy(s, tcon->tree_name, dfsplen); + memcpy(s, tree, dfsplen); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { int i; for (i = 0; i < dfsplen; i++) { @@ -135,6 +134,16 @@ build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, return s; } +char *build_path_from_dentry_optional_prefix(struct dentry *direntry, void *page, + bool prefix) +{ + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); + + return __build_path_from_dentry_optional_prefix(direntry, page, tcon->tree_name, + MAX_TREE_SIZE, prefix); +} + /* * Don't allow path components longer than the server max. * Don't allow the separator character in a path component. diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index d23dfc83de50..9b0919d9e337 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -687,6 +687,11 @@ int cifs_open(struct inode *inode, struct file *file) } else { _cifsFileInfo_put(cfile, true, false); } + } else { + /* hard link on the defeered close file */ + rc = cifs_get_hardlink_path(tcon, inode, file); + if (rc) + cifs_close_deferred_file(CIFS_I(inode)); } if (server->oplocks) @@ -1735,6 +1740,29 @@ cifs_move_llist(struct list_head *source, struct list_head *dest) list_move(li, dest); } +int +cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode, + struct file *file) +{ + struct cifsFileInfo *open_file = NULL; + struct cifsInodeInfo *cinode = CIFS_I(inode); + int rc = 0; + + spin_lock(&tcon->open_file_lock); + spin_lock(&cinode->open_file_lock); + + list_for_each_entry(open_file, &cinode->openFileList, flist) { + if (file->f_flags == open_file->f_flags) { + rc = -EINVAL; + break; + } + } + + spin_unlock(&cinode->open_file_lock); + spin_unlock(&tcon->open_file_lock); + return rc; +} + void cifs_free_llist(struct list_head *llist) { diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c index ca39d01077cd..e6d2e4162b08 100644 --- a/fs/smb/client/fs_context.c +++ b/fs/smb/client/fs_context.c @@ -1078,21 +1078,21 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, ctx->got_wsize = true; break; case Opt_acregmax: - ctx->acregmax = HZ * result.uint_32; - if (ctx->acregmax > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "acregmax too large\n"); goto cifs_parse_mount_err; } + ctx->acregmax = HZ * result.uint_32; break; case Opt_acdirmax: - ctx->acdirmax = HZ * result.uint_32; - if (ctx->acdirmax > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "acdirmax too large\n"); goto cifs_parse_mount_err; } + ctx->acdirmax = HZ * result.uint_32; break; case Opt_actimeo: - if (HZ * result.uint_32 > CIFS_MAX_ACTIMEO) { + if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) { cifs_errorf(fc, "timeout too large\n"); goto cifs_parse_mount_err; } @@ -1104,13 +1104,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc, ctx->acdirmax = ctx->acregmax = HZ * result.uint_32; break; case Opt_closetimeo: - ctx->closetimeo = HZ * result.uint_32; - if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) { + if (result.uint_32 > SMB3_MAX_DCLOSETIMEO / HZ) { cifs_errorf(fc, "closetimeo too large\n"); goto cifs_parse_mount_err; } + ctx->closetimeo = HZ * result.uint_32; break; case Opt_echo_interval: + if (result.uint_32 < SMB_ECHO_INTERVAL_MIN || + result.uint_32 > SMB_ECHO_INTERVAL_MAX) { + cifs_errorf(fc, "echo interval is out of bounds\n"); + goto cifs_parse_mount_err; + } ctx->echo_interval = result.uint_32; break; case Opt_snapshot: diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c index ae9905e2b9d4..7402070b7a06 100644 --- a/fs/smb/client/ioctl.c +++ b/fs/smb/client/ioctl.c @@ -246,7 +246,9 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server_it, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(ses_it, &server_it->smb_ses_list, smb_ses_list) { - if (ses_it->Suid == out.session_id) { + spin_lock(&ses_it->ses_lock); + if (ses_it->ses_status != SES_EXITING && + ses_it->Suid == out.session_id) { ses = ses_it; /* * since we are using the session outside the crit @@ -254,9 +256,11 @@ static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug * so increment its refcount */ ses->ses_count++; + spin_unlock(&ses_it->ses_lock); found = true; goto search_end; } + spin_unlock(&ses_it->ses_lock); } } search_end: diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c index c0f101fc1e5d..d71feb3fdbd2 100644 --- a/fs/smb/client/link.c +++ b/fs/smb/client/link.c @@ -269,7 +269,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_open_parms oparms; struct cifs_io_parms io_parms = {0}; int buf_type = CIFS_NO_BUFFER; - FILE_ALL_INFO file_info; + struct cifs_open_info_data query_data; oparms = (struct cifs_open_parms) { .tcon = tcon, @@ -281,11 +281,11 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, .fid = &fid, }; - rc = CIFS_open(xid, &oparms, &oplock, &file_info); + rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &query_data); if (rc) return rc; - if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { + if (query_data.fi.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { rc = -ENOENT; /* it's not a symlink */ goto out; @@ -324,7 +324,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, .fid = &fid, }; - rc = CIFS_open(xid, &oparms, &oplock, NULL); + rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, NULL); if (rc) return rc; diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c index 9a1f1913fb59..402ac06ae9b6 100644 --- a/fs/smb/client/readdir.c +++ b/fs/smb/client/readdir.c @@ -765,7 +765,10 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, else cifs_buf_release(cfile->srch_inf. ntwrk_buf_start); + /* Reset all pointers to the network buffer to prevent stale references */ cfile->srch_inf.ntwrk_buf_start = NULL; + cfile->srch_inf.srch_entries_start = NULL; + cfile->srch_inf.last_entry = NULL; } rc = initiate_cifs_search(xid, file, full_path); if (rc) { @@ -788,11 +791,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, rc = server->ops->query_dir_next(xid, tcon, &cfile->fid, search_flags, &cfile->srch_inf); + if (rc) + return -ENOENT; /* FindFirst/Next set last_entry to NULL on malformed reply */ if (cfile->srch_inf.last_entry) cifs_save_resume_key(cfile->srch_inf.last_entry, cfile); - if (rc) - return -ENOENT; } if (index_to_find < cfile->srch_inf.index_of_last_entry) { /* we found the buffer that contains the entry */ diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c index b8e14bcd2c68..c8f7ae0a2006 100644 --- a/fs/smb/client/sess.c +++ b/fs/smb/client/sess.c @@ -1209,12 +1209,13 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) switch (requested) { case Kerberos: case RawNTLMSSP: + case IAKerb: return requested; case Unspecified: if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; - if ((server->sec_kerberos || server->sec_mskerberos) && + if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; fallthrough; diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c index 96c0d2682ba2..1489b9d21b60 100644 --- a/fs/smb/client/smb1ops.c +++ b/fs/smb/client/smb1ops.c @@ -377,7 +377,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) static void cifs_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { cifs_set_oplock_level(cinode, oplock); } @@ -426,13 +426,6 @@ cifs_negotiate(const unsigned int xid, { int rc; rc = CIFSSMBNegotiate(xid, ses, server); - if (rc == -EAGAIN) { - /* retry only once on 1st time connection */ - set_credits(server, 1); - rc = CIFSSMBNegotiate(xid, ses, server); - if (rc == -EAGAIN) - rc = -EHOSTDOWN; - } return rc; } diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c index a7475bc05cac..afdc78e92ee9 100644 --- a/fs/smb/client/smb2file.c +++ b/fs/smb/client/smb2file.c @@ -108,16 +108,25 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 int err_buftype = CIFS_NO_BUFFER; struct cifs_fid *fid = oparms->fid; struct network_resiliency_req nr_ioctl_req; + bool retry_without_read_attributes = false; smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb); if (smb2_path == NULL) return -ENOMEM; - oparms->desired_access |= FILE_READ_ATTRIBUTES; + if (!(oparms->desired_access & FILE_READ_ATTRIBUTES)) { + oparms->desired_access |= FILE_READ_ATTRIBUTES; + retry_without_read_attributes = true; + } smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH; rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, &err_buftype); + if (rc == -EACCES && retry_without_read_attributes) { + oparms->desired_access &= ~FILE_READ_ATTRIBUTES; + rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, + &err_buftype); + } if (rc && data) { struct smb2_hdr *hdr = err_iov.iov_base; diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c index 8c149cb531d3..01352d7c10f1 100644 --- a/fs/smb/client/smb2misc.c +++ b/fs/smb/client/smb2misc.c @@ -814,11 +814,12 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid, WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative"); spin_unlock(&cifs_tcp_ses_lock); - if (tcon->ses) + if (tcon->ses) { server = tcon->ses->server; - - cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n", - tcon->tid, persistent_fid, volatile_fid); + cifs_server_dbg(FYI, + "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n", + tcon->tid, persistent_fid, volatile_fid); + } return 0; } diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index d1e5ff9a3cd3..c9b9892b510e 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -422,9 +422,6 @@ smb2_negotiate(const unsigned int xid, server->CurrentMid = 0; spin_unlock(&server->mid_lock); rc = SMB2_negotiate(xid, ses, server); - /* BB we probably don't need to retry with modern servers */ - if (rc == -EAGAIN) - rc = -EHOSTDOWN; return rc; } @@ -615,7 +612,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, while (bytes_left >= (ssize_t)sizeof(*p)) { memset(&tmp_iface, 0, sizeof(tmp_iface)); - tmp_iface.speed = le64_to_cpu(p->LinkSpeed); + /* default to 1Gbps when link speed is unset */ + tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000; tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0; @@ -2897,6 +2895,12 @@ parse_reparse_posix(struct reparse_posix_data *symlink_buf, /* See MS-FSCC 2.1.2.6 for the 'NFS' style reparse tags */ len = le16_to_cpu(symlink_buf->ReparseDataLength); + if (len < sizeof(symlink_buf->InodeType)) { + cifs_dbg(VFS, "srv returned malformed nfs buffer\n"); + return -EIO; + } + + len -= sizeof(symlink_buf->InodeType); if (le64_to_cpu(symlink_buf->InodeType) != NFS_SPECFILE_LNK) { cifs_dbg(VFS, "%lld not a supported symlink type\n", @@ -4104,22 +4108,22 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode, static void smb2_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { server->ops->set_oplock_level(cinode, oplock, 0, NULL); } static void smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache); + __u16 epoch, bool *purge_cache); static void smb3_downgrade_oplock(struct TCP_Server_Info *server, struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { unsigned int old_state = cinode->oplock; - unsigned int old_epoch = cinode->epoch; + __u16 old_epoch = cinode->epoch; unsigned int new_state; if (epoch > old_epoch) { @@ -4139,7 +4143,7 @@ smb3_downgrade_oplock(struct TCP_Server_Info *server, static void smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { oplock &= 0xFF; cinode->lease_granted = false; @@ -4163,7 +4167,7 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, static void smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { char message[5] = {0}; unsigned int new_oplock = 0; @@ -4200,7 +4204,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, static void smb3_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, - unsigned int epoch, bool *purge_cache) + __u16 epoch, bool *purge_cache) { unsigned int old_oplock = cinode->oplock; @@ -4254,7 +4258,7 @@ map_oplock_to_lease(u8 oplock) if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) return SMB2_LEASE_WRITE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE; else if (oplock == SMB2_OPLOCK_LEVEL_II) - return SMB2_LEASE_READ_CACHING_LE; + return SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_HANDLE_CACHING_LE; else if (oplock == SMB2_OPLOCK_LEVEL_BATCH) return SMB2_LEASE_HANDLE_CACHING_LE | SMB2_LEASE_READ_CACHING_LE | SMB2_LEASE_WRITE_CACHING_LE; @@ -4314,7 +4318,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock) } static __u8 -smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) +smb2_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key) { struct create_lease *lc = (struct create_lease *)buf; @@ -4325,7 +4329,7 @@ smb2_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) } static __u8 -smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key) +smb3_parse_lease_buf(void *buf, __u16 *epoch, char *lease_key) { struct create_lease_v2 *lc = (struct create_lease_v2 *)buf; @@ -4482,7 +4486,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) */ static int crypt_message(struct TCP_Server_Info *server, int num_rqst, - struct smb_rqst *rqst, int enc) + struct smb_rqst *rqst, int enc, struct crypto_aead *tfm) { struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)rqst[0].rq_iov[0].iov_base; @@ -4493,8 +4497,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, u8 key[SMB3_ENC_DEC_KEY_SIZE]; struct aead_request *req; u8 *iv; - DECLARE_CRYPTO_WAIT(wait); - struct crypto_aead *tfm; unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize); void *creq; @@ -4505,14 +4507,6 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, return rc; } - rc = smb3_crypto_aead_allocate(server); - if (rc) { - cifs_server_dbg(VFS, "%s: crypto alloc failed\n", __func__); - return rc; - } - - tfm = enc ? server->secmech.enc : server->secmech.dec; - if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) || (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE); @@ -4551,11 +4545,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst, aead_request_set_crypt(req, sg, sg, crypt_len, iv); aead_request_set_ad(req, assoc_data_len); - aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - crypto_req_done, &wait); - - rc = crypto_wait_req(enc ? crypto_aead_encrypt(req) - : crypto_aead_decrypt(req), &wait); + rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); if (!rc && enc) memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE); @@ -4644,7 +4634,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst, /* fill the 1st iov with a transform header */ fill_transform_hdr(tr_hdr, orig_len, old_rq, server->cipher_type); - rc = crypt_message(server, num_rqst, new_rq, 1); + rc = crypt_message(server, num_rqst, new_rq, 1, server->secmech.enc); cifs_dbg(FYI, "Encrypt message returned %d\n", rc); if (rc) goto err_free; @@ -4670,8 +4660,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf, unsigned int npages, unsigned int page_data_size, bool is_offloaded) { - struct kvec iov[2]; + struct crypto_aead *tfm; struct smb_rqst rqst = {NULL}; + struct kvec iov[2]; int rc; iov[0].iov_base = buf; @@ -4686,9 +4677,31 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf, rqst.rq_pagesz = PAGE_SIZE; rqst.rq_tailsz = (page_data_size % PAGE_SIZE) ? : PAGE_SIZE; - rc = crypt_message(server, 1, &rqst, 0); + if (is_offloaded) { + if ((server->cipher_type == SMB2_ENCRYPTION_AES128_GCM) || + (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) + tfm = crypto_alloc_aead("gcm(aes)", 0, 0); + else + tfm = crypto_alloc_aead("ccm(aes)", 0, 0); + if (IS_ERR(tfm)) { + rc = PTR_ERR(tfm); + cifs_server_dbg(VFS, "%s: Failed alloc decrypt TFM, rc=%d\n", __func__, rc); + + return rc; + } + } else { + if (unlikely(!server->secmech.dec)) + return -EIO; + + tfm = server->secmech.dec; + } + + rc = crypt_message(server, 1, &rqst, 0, tfm); cifs_dbg(FYI, "Decrypt message returned %d\n", rc); + if (is_offloaded) + crypto_free_aead(tfm); + if (rc) return rc; @@ -5142,6 +5155,10 @@ one_more: next_buffer = (char *)cifs_buf_get(); else next_buffer = (char *)cifs_small_buf_get(); + if (!next_buffer) { + cifs_server_dbg(VFS, "No memory for (large) SMB response\n"); + return -1; + } memcpy(next_buffer, buf + next_cmd, pdu_length - next_cmd); } diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c index 9975711236b2..c28e39c67c4f 100644 --- a/fs/smb/client/smb2pdu.c +++ b/fs/smb/client/smb2pdu.c @@ -1070,7 +1070,9 @@ SMB2_negotiate(const unsigned int xid, * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context * Set the cipher type manually. */ - if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) + if ((server->dialect == SMB30_PROT_ID || + server->dialect == SMB302_PROT_ID) && + (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION)) server->cipher_type = SMB2_ENCRYPTION_AES128_CCM; security_blob = smb2_get_data_area_len(&blob_offset, &blob_length, @@ -1105,6 +1107,12 @@ SMB2_negotiate(const unsigned int xid, else cifs_server_dbg(VFS, "Missing expected negotiate contexts\n"); } + + if (server->cipher_type && !rc) { + rc = smb3_crypto_aead_allocate(server); + if (rc) + cifs_server_dbg(VFS, "%s: crypto alloc failed, rc=%d\n", __func__, rc); + } neg_exit: free_rsp_buf(resp_buftype, rsp); return rc; @@ -1262,7 +1270,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if (server->sec_ntlmssp && (global_secflags & CIFSSEC_MAY_NTLMSSP)) return RawNTLMSSP; - if ((server->sec_kerberos || server->sec_mskerberos) && + if ((server->sec_kerberos || server->sec_mskerberos || server->sec_iakerb) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; fallthrough; @@ -1991,7 +1999,7 @@ tcon_exit: tcon_error_exit: if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) - cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); + cifs_dbg(VFS | ONCE, "BAD_NETWORK_NAME: %s\n", tree); goto tcon_exit; } @@ -2150,7 +2158,7 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info, int smb2_parse_contexts(struct TCP_Server_Info *server, struct kvec *rsp_iov, - unsigned int *epoch, + __u16 *epoch, char *lease_key, __u8 *oplock, struct smb2_file_all_info *buf, struct create_posix_rsp *posix) @@ -2818,7 +2826,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode, /* Eventually save off posix specific response info and timestaps */ err_free_rsp_buf: - free_rsp_buf(resp_buftype, rsp); + free_rsp_buf(resp_buftype, rsp_iov.iov_base); kfree(pc_buf); err_free_req: cifs_small_buf_release(req); diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h index b325fde010ad..79e5e7764c3d 100644 --- a/fs/smb/client/smb2proto.h +++ b/fs/smb/client/smb2proto.h @@ -251,7 +251,7 @@ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *, enum securityEnum); int smb2_parse_contexts(struct TCP_Server_Info *server, struct kvec *rsp_iov, - unsigned int *epoch, + __u16 *epoch, char *lease_key, __u8 *oplock, struct smb2_file_all_info *buf, struct create_posix_rsp *posix); diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c index 3fdafb9297f1..d2867bd263c5 100644 --- a/fs/smb/client/transport.c +++ b/fs/smb/client/transport.c @@ -178,7 +178,7 @@ delete_mid(struct mid_q_entry *mid) * Our basic "send data to server" function. Should be called with srv_mutex * held. The caller is responsible for handling the results. */ -static int +int smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, size_t *sent) { diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c index 229a6527870d..f8a192cc82f2 100644 --- a/fs/smb/server/auth.c +++ b/fs/smb/server/auth.c @@ -544,7 +544,19 @@ int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob, retval = -ENOMEM; goto out; } - sess->user = user; + + if (!sess->user) { + /* First successful authentication */ + sess->user = user; + } else { + if (!ksmbd_compare_user(sess->user, user)) { + ksmbd_debug(AUTH, "different user tried to reuse session\n"); + retval = -EPERM; + ksmbd_free_user(user); + goto out; + } + ksmbd_free_user(user); + } memcpy(sess->sess_key, resp->payload, resp->session_key_len); memcpy(out_blob, resp->payload + resp->session_key_len, @@ -1011,6 +1023,8 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id, ses_enc_key = enc ? sess->smb3encryptionkey : sess->smb3decryptionkey; memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); + if (!enc) + ksmbd_user_session_put(sess); return 0; } @@ -1209,7 +1223,7 @@ free_iv: free_sg: kfree(sg); free_req: - kfree(req); + aead_request_free(req); free_ctx: ksmbd_release_crypto_ctx(ctx); return rc; diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c index 710ac7b976fa..f00fa7760412 100644 --- a/fs/smb/server/mgmt/user_session.c +++ b/fs/smb/server/mgmt/user_session.c @@ -180,7 +180,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn) down_write(&sessions_table_lock); down_write(&conn->session_lock); xa_for_each(&conn->sessions, id, sess) { - if (atomic_read(&sess->refcnt) == 0 && + if (atomic_read(&sess->refcnt) <= 1 && (sess->state != SMB2_SESSION_VALID || time_after(jiffies, sess->last_active + SMB2_SESSION_TIMEOUT))) { @@ -229,7 +229,11 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) if (!ksmbd_chann_del(conn, sess) && xa_empty(&sess->ksmbd_chann_list)) { hash_del(&sess->hlist); - ksmbd_session_destroy(sess); + down_write(&conn->session_lock); + xa_erase(&conn->sessions, sess->id); + up_write(&conn->session_lock); + if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } } } @@ -248,13 +252,30 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn) if (xa_empty(&sess->ksmbd_chann_list)) { xa_erase(&conn->sessions, sess->id); hash_del(&sess->hlist); - ksmbd_session_destroy(sess); + if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } } up_write(&conn->session_lock); up_write(&sessions_table_lock); } +bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn, + unsigned long long id) +{ + struct ksmbd_session *sess; + + down_read(&conn->session_lock); + sess = xa_load(&conn->sessions, id); + if (sess) { + up_read(&conn->session_lock); + return true; + } + up_read(&conn->session_lock); + + return false; +} + struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id) { @@ -262,8 +283,10 @@ struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, down_read(&conn->session_lock); sess = xa_load(&conn->sessions, id); - if (sess) + if (sess) { sess->last_active = jiffies; + ksmbd_user_session_get(sess); + } up_read(&conn->session_lock); return sess; } @@ -274,6 +297,8 @@ struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id) down_read(&sessions_table_lock); sess = __session_lookup(id); + if (sess) + ksmbd_user_session_get(sess); up_read(&sessions_table_lock); return sess; @@ -304,8 +329,8 @@ void ksmbd_user_session_put(struct ksmbd_session *sess) if (atomic_read(&sess->refcnt) <= 0) WARN_ON(1); - else - atomic_dec(&sess->refcnt); + else if (atomic_dec_and_test(&sess->refcnt)) + ksmbd_session_destroy(sess); } struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn, @@ -376,7 +401,7 @@ static struct ksmbd_session *__session_create(int protocol) xa_init(&sess->rpc_handle_list); sess->sequence_number = 1; rwlock_init(&sess->tree_conns_lock); - atomic_set(&sess->refcnt, 1); + atomic_set(&sess->refcnt, 2); ret = __init_smb2_session(sess); if (ret) diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h index ce91b1d698e7..f4da293c4dbb 100644 --- a/fs/smb/server/mgmt/user_session.h +++ b/fs/smb/server/mgmt/user_session.h @@ -87,6 +87,8 @@ void ksmbd_session_destroy(struct ksmbd_session *sess); struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id); struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn, unsigned long long id); +bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn, + unsigned long long id); int ksmbd_session_register(struct ksmbd_conn *conn, struct ksmbd_session *sess); void ksmbd_sessions_deregister(struct ksmbd_conn *conn); diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c index b29e78b517bf..258ed9978b90 100644 --- a/fs/smb/server/oplock.c +++ b/fs/smb/server/oplock.c @@ -1515,7 +1515,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease) * @open_req: buffer containing smb2 file open(create) request * @is_dir: whether leasing file is directory * - * Return: oplock state, -ENOENT if create lease context not found + * Return: allocated lease context object on success, otherwise NULL */ struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir) { @@ -1534,6 +1534,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir) if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) { struct create_lease_v2 *lc = (struct create_lease_v2 *)cc; + if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease_v2) - 4) + goto err_out; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); if (is_dir) { lreq->req_state = lc->lcontext.LeaseState & @@ -1551,6 +1555,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir) } else { struct create_lease *lc = (struct create_lease *)cc; + if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) < + sizeof(struct create_lease)) + goto err_out; + memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE); lreq->req_state = lc->lcontext.LeaseState; lreq->flags = lc->lcontext.LeaseFlags; @@ -1558,6 +1566,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req, bool is_dir) lreq->version = 1; } return lreq; +err_out: + kfree(lreq); + return NULL; } /** diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c index da5b9678ad05..27d8d6c6fdac 100644 --- a/fs/smb/server/server.c +++ b/fs/smb/server/server.c @@ -241,14 +241,14 @@ send: if (work->tcon) ksmbd_tree_connect_put(work->tcon); smb3_preauth_hash_rsp(work); - if (work->sess) - ksmbd_user_session_put(work->sess); if (work->sess && work->sess->enc && work->encrypted && conn->ops->encrypt_resp) { rc = conn->ops->encrypt_resp(work); if (rc < 0) conn->ops->set_rsp_status(work, STATUS_DATA_ERROR); } + if (work->sess) + ksmbd_user_session_put(work->sess); ksmbd_conn_write(work); } diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c index 03dded29a980..727cb49926ee 100644 --- a/fs/smb/server/smb2misc.c +++ b/fs/smb/server/smb2misc.c @@ -101,13 +101,17 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, *len = le16_to_cpu(((struct smb2_sess_setup_req *)hdr)->SecurityBufferLength); break; case SMB2_TREE_CONNECT: - *off = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset); + *off = max_t(unsigned short int, + le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathOffset), + offsetof(struct smb2_tree_connect_req, Buffer)); *len = le16_to_cpu(((struct smb2_tree_connect_req *)hdr)->PathLength); break; case SMB2_CREATE: { unsigned short int name_off = - le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset); + max_t(unsigned short int, + le16_to_cpu(((struct smb2_create_req *)hdr)->NameOffset), + offsetof(struct smb2_create_req, Buffer)); unsigned short int name_len = le16_to_cpu(((struct smb2_create_req *)hdr)->NameLength); @@ -128,11 +132,15 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, break; } case SMB2_QUERY_INFO: - *off = le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset); + *off = max_t(unsigned int, + le16_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferOffset), + offsetof(struct smb2_query_info_req, Buffer)); *len = le32_to_cpu(((struct smb2_query_info_req *)hdr)->InputBufferLength); break; case SMB2_SET_INFO: - *off = le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset); + *off = max_t(unsigned int, + le16_to_cpu(((struct smb2_set_info_req *)hdr)->BufferOffset), + offsetof(struct smb2_set_info_req, Buffer)); *len = le32_to_cpu(((struct smb2_set_info_req *)hdr)->BufferLength); break; case SMB2_READ: @@ -142,7 +150,7 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, case SMB2_WRITE: if (((struct smb2_write_req *)hdr)->DataOffset || ((struct smb2_write_req *)hdr)->Length) { - *off = max_t(unsigned int, + *off = max_t(unsigned short int, le16_to_cpu(((struct smb2_write_req *)hdr)->DataOffset), offsetof(struct smb2_write_req, Buffer)); *len = le32_to_cpu(((struct smb2_write_req *)hdr)->Length); @@ -153,7 +161,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, *len = le16_to_cpu(((struct smb2_write_req *)hdr)->WriteChannelInfoLength); break; case SMB2_QUERY_DIRECTORY: - *off = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset); + *off = max_t(unsigned short int, + le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameOffset), + offsetof(struct smb2_query_directory_req, Buffer)); *len = le16_to_cpu(((struct smb2_query_directory_req *)hdr)->FileNameLength); break; case SMB2_LOCK: @@ -168,7 +178,9 @@ static int smb2_get_data_area_len(unsigned int *off, unsigned int *len, break; } case SMB2_IOCTL: - *off = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset); + *off = max_t(unsigned int, + le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputOffset), + offsetof(struct smb2_ioctl_req, Buffer)); *len = le32_to_cpu(((struct smb2_ioctl_req *)hdr)->InputCount); break; default: diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index 9b5847bf9b2a..9b1ba4aedbce 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -66,8 +66,10 @@ static inline bool check_session_id(struct ksmbd_conn *conn, u64 id) return false; sess = ksmbd_session_lookup_all(conn, id); - if (sess) + if (sess) { + ksmbd_user_session_put(sess); return true; + } pr_err("Invalid user session id: %llu\n", id); return false; } @@ -604,10 +606,8 @@ int smb2_check_user_session(struct ksmbd_work *work) /* Check for validity of user session */ work->sess = ksmbd_session_lookup_all(conn, sess_id); - if (work->sess) { - ksmbd_user_session_get(work->sess); + if (work->sess) return 1; - } ksmbd_debug(SMB, "Invalid user session, Uid %llu\n", sess_id); return -ENOENT; } @@ -712,6 +712,9 @@ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status) struct smb2_hdr *rsp_hdr; struct ksmbd_work *in_work = ksmbd_alloc_work_struct(); + if (!in_work) + return; + if (allocate_interim_rsp_buf(in_work)) { pr_err("smb_allocate_rsp_buf failed!\n"); ksmbd_free_work_struct(in_work); @@ -1612,9 +1615,6 @@ static int krb5_authenticate(struct ksmbd_work *work, if (prev_sess_id && prev_sess_id != sess->id) destroy_previous_session(conn, sess->user, prev_sess_id); - if (sess->state == SMB2_SESSION_VALID) - ksmbd_free_user(sess->user); - retval = ksmbd_krb5_authenticate(sess, in_blob, in_len, out_blob, &out_len); if (retval) { @@ -1746,11 +1746,12 @@ int smb2_sess_setup(struct ksmbd_work *work) if (ksmbd_conn_need_reconnect(conn)) { rc = -EFAULT; + ksmbd_user_session_put(sess); sess = NULL; goto out_err; } - if (ksmbd_session_lookup(conn, sess_id)) { + if (is_ksmbd_session_in_connection(conn, sess_id)) { rc = -EACCES; goto out_err; } @@ -1761,7 +1762,6 @@ int smb2_sess_setup(struct ksmbd_work *work) } conn->binding = true; - ksmbd_user_session_get(sess); } else if ((conn->dialect < SMB30_PROT_ID || server_conf.flags & KSMBD_GLOBAL_FLAG_SMB3_MULTICHANNEL) && (req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) { @@ -1788,7 +1788,6 @@ int smb2_sess_setup(struct ksmbd_work *work) } conn->binding = false; - ksmbd_user_session_get(sess); } work->sess = sess; @@ -1918,6 +1917,8 @@ out_err: sess->last_active = jiffies; sess->state = SMB2_SESSION_EXPIRED; + ksmbd_user_session_put(sess); + work->sess = NULL; if (try_delay) { ksmbd_conn_set_need_reconnect(conn); ssleep(5); @@ -1961,7 +1962,7 @@ int smb2_tree_connect(struct ksmbd_work *work) WORK_BUFFERS(work, req, rsp); - treename = smb_strndup_from_utf16(req->Buffer, + treename = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->PathOffset), le16_to_cpu(req->PathLength), true, conn->local_nls); if (IS_ERR(treename)) { @@ -2207,9 +2208,9 @@ err_out: int smb2_session_logoff(struct ksmbd_work *work) { struct ksmbd_conn *conn = work->conn; + struct ksmbd_session *sess = work->sess; struct smb2_logoff_req *req; struct smb2_logoff_rsp *rsp; - struct ksmbd_session *sess; u64 sess_id; int err; @@ -2231,11 +2232,6 @@ int smb2_session_logoff(struct ksmbd_work *work) ksmbd_close_session_fds(work); ksmbd_conn_wait_idle(conn, sess_id); - /* - * Re-lookup session to validate if session is deleted - * while waiting request complete - */ - sess = ksmbd_session_lookup_all(conn, sess_id); if (ksmbd_tree_conn_session_logoff(sess)) { ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId); rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED; @@ -2243,13 +2239,14 @@ int smb2_session_logoff(struct ksmbd_work *work) return -ENOENT; } - ksmbd_destroy_file_table(&sess->file_table); down_write(&conn->session_lock); sess->state = SMB2_SESSION_EXPIRED; up_write(&conn->session_lock); - ksmbd_free_user(sess->user); - sess->user = NULL; + if (sess->user) { + ksmbd_free_user(sess->user); + sess->user = NULL; + } ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE); rsp->StructureSize = cpu_to_le16(4); @@ -2723,7 +2720,7 @@ int smb2_open(struct ksmbd_work *work) goto err_out2; } - name = smb2_get_name(req->Buffer, + name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset), le16_to_cpu(req->NameLength), work->conn->local_nls); if (IS_ERR(name)) { @@ -3241,7 +3238,7 @@ int smb2_open(struct ksmbd_work *work) goto err_out1; } } else { - if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE) { + if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE && lc) { /* * Compare parent lease using parent key. If there is no * a lease that has same parent key, Send lease break @@ -3988,6 +3985,7 @@ static bool __query_dir(struct dir_context *ctx, const char *name, int namlen, /* dot and dotdot entries are already reserved */ if (!strcmp(".", name) || !strcmp("..", name)) return true; + d_info->num_scan++; if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name)) return true; if (!match_pattern(name, namlen, priv->search_pattern)) @@ -4096,7 +4094,7 @@ int smb2_query_dir(struct ksmbd_work *work) } srch_flag = req->Flags; - srch_ptr = smb_strndup_from_utf16(req->Buffer, + srch_ptr = smb_strndup_from_utf16((char *)req + le16_to_cpu(req->FileNameOffset), le16_to_cpu(req->FileNameLength), 1, conn->local_nls); if (IS_ERR(srch_ptr)) { @@ -4148,8 +4146,17 @@ int smb2_query_dir(struct ksmbd_work *work) query_dir_private.info_level = req->FileInformationClass; dir_fp->readdir_data.private = &query_dir_private; set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir); - +again: + d_info.num_scan = 0; rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx); + /* + * num_entry can be 0 if the directory iteration stops before reaching + * the end of the directory and no file is matched with the search + * pattern. + */ + if (rc >= 0 && !d_info.num_entry && d_info.num_scan && + d_info.out_buf_len > 0) + goto again; /* * req->OutputBufferLength is too small to contain even one entry. * In this case, it immediately returns OutputBufferLength 0 to client. @@ -4357,7 +4364,8 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp, sizeof(struct smb2_ea_info_req)) return -EINVAL; - ea_req = (struct smb2_ea_info_req *)req->Buffer; + ea_req = (struct smb2_ea_info_req *)((char *)req + + le16_to_cpu(req->InputBufferOffset)); } else { /* need to send all EAs, if no specific EA is requested*/ if (le32_to_cpu(req->Flags) & SL_RETURN_SINGLE_ENTRY) @@ -5971,6 +5979,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, struct ksmbd_share_config *share) { unsigned int buf_len = le32_to_cpu(req->BufferLength); + char *buffer = (char *)req + le16_to_cpu(req->BufferOffset); switch (req->FileInfoClass) { case FILE_BASIC_INFORMATION: @@ -5978,7 +5987,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, if (buf_len < sizeof(struct smb2_file_basic_info)) return -EINVAL; - return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share); + return set_file_basic_info(fp, (struct smb2_file_basic_info *)buffer, share); } case FILE_ALLOCATION_INFORMATION: { @@ -5986,7 +5995,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, return -EINVAL; return set_file_allocation_info(work, fp, - (struct smb2_file_alloc_info *)req->Buffer); + (struct smb2_file_alloc_info *)buffer); } case FILE_END_OF_FILE_INFORMATION: { @@ -5994,7 +6003,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, return -EINVAL; return set_end_of_file_info(work, fp, - (struct smb2_file_eof_info *)req->Buffer); + (struct smb2_file_eof_info *)buffer); } case FILE_RENAME_INFORMATION: { @@ -6002,7 +6011,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, return -EINVAL; return set_rename_info(work, fp, - (struct smb2_file_rename_info *)req->Buffer, + (struct smb2_file_rename_info *)buffer, buf_len); } case FILE_LINK_INFORMATION: @@ -6011,7 +6020,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, return -EINVAL; return smb2_create_link(work, work->tcon->share_conf, - (struct smb2_file_link_info *)req->Buffer, + (struct smb2_file_link_info *)buffer, buf_len, fp->filp, work->conn->local_nls); } @@ -6021,7 +6030,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, return -EINVAL; return set_file_disposition_info(fp, - (struct smb2_file_disposition_info *)req->Buffer); + (struct smb2_file_disposition_info *)buffer); } case FILE_FULL_EA_INFORMATION: { @@ -6034,7 +6043,7 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, if (buf_len < sizeof(struct smb2_ea_info)) return -EINVAL; - return smb2_set_ea((struct smb2_ea_info *)req->Buffer, + return smb2_set_ea((struct smb2_ea_info *)buffer, buf_len, &fp->filp->f_path, true); } case FILE_POSITION_INFORMATION: @@ -6042,14 +6051,14 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp, if (buf_len < sizeof(struct smb2_file_pos_info)) return -EINVAL; - return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer); + return set_file_position_info(fp, (struct smb2_file_pos_info *)buffer); } case FILE_MODE_INFORMATION: { if (buf_len < sizeof(struct smb2_file_mode_info)) return -EINVAL; - return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer); + return set_file_mode_info(fp, (struct smb2_file_mode_info *)buffer); } } @@ -6130,7 +6139,7 @@ int smb2_set_info(struct ksmbd_work *work) } rc = smb2_set_info_sec(fp, le32_to_cpu(req->AdditionalInformation), - req->Buffer, + (char *)req + le16_to_cpu(req->BufferOffset), le32_to_cpu(req->BufferLength)); ksmbd_revert_fsids(work); break; @@ -6368,6 +6377,10 @@ int smb2_read(struct ksmbd_work *work) } offset = le64_to_cpu(req->Offset); + if (offset < 0) { + err = -EINVAL; + goto out; + } length = le32_to_cpu(req->Length); mincount = le32_to_cpu(req->MinimumCount); @@ -6581,6 +6594,8 @@ int smb2_write(struct ksmbd_work *work) } offset = le64_to_cpu(req->Offset); + if (offset < 0) + return -EINVAL; length = le32_to_cpu(req->Length); if (req->Channel == SMB2_CHANNEL_RDMA_V1 || @@ -7103,17 +7118,17 @@ out_check_cl: } no_check_cl: + flock = smb_lock->fl; + list_del(&smb_lock->llist); + if (smb_lock->zero_len) { err = 0; goto skip; } - - flock = smb_lock->fl; - list_del(&smb_lock->llist); retry: rc = vfs_lock_file(filp, smb_lock->cmd, flock, NULL); skip: - if (flags & SMB2_LOCKFLAG_UNLOCK) { + if (smb_lock->flags & SMB2_LOCKFLAG_UNLOCK) { if (!rc) { ksmbd_debug(SMB, "File unlocked\n"); } else if (rc == -ENOENT) { @@ -7576,7 +7591,7 @@ static int fsctl_pipe_transceive(struct ksmbd_work *work, u64 id, struct smb2_ioctl_rsp *rsp) { struct ksmbd_rpc_command *rpc_resp; - char *data_buf = (char *)&req->Buffer[0]; + char *data_buf = (char *)req + le32_to_cpu(req->InputOffset); int nbytes = 0; rpc_resp = ksmbd_rpc_ioctl(work->sess, id, data_buf, @@ -7689,6 +7704,7 @@ int smb2_ioctl(struct ksmbd_work *work) u64 id = KSMBD_NO_FID; struct ksmbd_conn *conn = work->conn; int ret = 0; + char *buffer; if (work->next_smb2_rcv_hdr_off) { req = ksmbd_req_buf_next(work); @@ -7711,6 +7727,8 @@ int smb2_ioctl(struct ksmbd_work *work) goto out; } + buffer = (char *)req + le32_to_cpu(req->InputOffset); + cnt_code = le32_to_cpu(req->CtlCode); ret = smb2_calc_max_out_buf_len(work, 48, le32_to_cpu(req->MaxOutputResponse)); @@ -7768,7 +7786,7 @@ int smb2_ioctl(struct ksmbd_work *work) } ret = fsctl_validate_negotiate_info(conn, - (struct validate_negotiate_info_req *)&req->Buffer[0], + (struct validate_negotiate_info_req *)buffer, (struct validate_negotiate_info_rsp *)&rsp->Buffer[0], in_buf_len); if (ret < 0) @@ -7821,7 +7839,7 @@ int smb2_ioctl(struct ksmbd_work *work) rsp->VolatileFileId = req->VolatileFileId; rsp->PersistentFileId = req->PersistentFileId; fsctl_copychunk(work, - (struct copychunk_ioctl_req *)&req->Buffer[0], + (struct copychunk_ioctl_req *)buffer, le32_to_cpu(req->CtlCode), le32_to_cpu(req->InputCount), req->VolatileFileId, @@ -7834,8 +7852,7 @@ int smb2_ioctl(struct ksmbd_work *work) goto out; } - ret = fsctl_set_sparse(work, id, - (struct file_sparse *)&req->Buffer[0]); + ret = fsctl_set_sparse(work, id, (struct file_sparse *)buffer); if (ret < 0) goto out; break; @@ -7858,7 +7875,7 @@ int smb2_ioctl(struct ksmbd_work *work) } zero_data = - (struct file_zero_data_information *)&req->Buffer[0]; + (struct file_zero_data_information *)buffer; off = le64_to_cpu(zero_data->FileOffset); bfz = le64_to_cpu(zero_data->BeyondFinalZero); @@ -7889,7 +7906,7 @@ int smb2_ioctl(struct ksmbd_work *work) } ret = fsctl_query_allocated_ranges(work, id, - (struct file_allocated_range_buffer *)&req->Buffer[0], + (struct file_allocated_range_buffer *)buffer, (struct file_allocated_range_buffer *)&rsp->Buffer[0], out_buf_len / sizeof(struct file_allocated_range_buffer), &nbytes); @@ -7933,7 +7950,7 @@ int smb2_ioctl(struct ksmbd_work *work) goto out; } - dup_ext = (struct duplicate_extents_to_file *)&req->Buffer[0]; + dup_ext = (struct duplicate_extents_to_file *)buffer; fp_in = ksmbd_lookup_fd_slow(work, dup_ext->VolatileFileHandle, dup_ext->PersistentFileHandle); @@ -8672,6 +8689,7 @@ int smb3_decrypt_req(struct ksmbd_work *work) le64_to_cpu(tr_hdr->SessionId)); return -ECONNABORTED; } + ksmbd_user_session_put(sess); iov[0].iov_base = buf; iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4; diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c index bdcdc0fc9cad..7134abeeb53e 100644 --- a/fs/smb/server/smb_common.c +++ b/fs/smb/server/smb_common.c @@ -18,8 +18,8 @@ #include "mgmt/share_config.h" /*for shortname implementation */ -static const char basechars[43] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%"; -#define MANGLE_BASE (sizeof(basechars) / sizeof(char) - 1) +static const char *basechars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%"; +#define MANGLE_BASE (strlen(basechars) - 1) #define MAGIC_CHAR '~' #define PERIOD '.' #define mangle(V) ((char)(basechars[(V) % MANGLE_BASE])) diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c index 6fd3560028d3..90c5e3edbf46 100644 --- a/fs/smb/server/smbacl.c +++ b/fs/smb/server/smbacl.c @@ -270,6 +270,11 @@ static int sid_to_id(struct user_namespace *user_ns, return -EIO; } + if (psid->num_subauth == 0) { + pr_err("%s: zero subauthorities!\n", __func__); + return -EIO; + } + if (sidtype == SIDOWNER) { kuid_t uid; uid_t id; @@ -398,7 +403,9 @@ static void parse_dacl(struct user_namespace *user_ns, if (num_aces <= 0) return; - if (num_aces > ULONG_MAX / sizeof(struct smb_ace *)) + if (num_aces > (le16_to_cpu(pdacl->size) - sizeof(struct smb_acl)) / + (offsetof(struct smb_ace, sid) + + offsetof(struct smb_sid, sub_auth) + sizeof(__le16))) return; ret = init_acl_state(&acl_state, num_aces); @@ -432,6 +439,7 @@ static void parse_dacl(struct user_namespace *user_ns, offsetof(struct smb_sid, sub_auth); if (end_of_acl - acl_base < acl_size || + ppace[i]->sid.num_subauth == 0 || ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES || (end_of_acl - acl_base < acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) || diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c index 8752ac82c557..7fc4b33b89e3 100644 --- a/fs/smb/server/transport_ipc.c +++ b/fs/smb/server/transport_ipc.c @@ -267,6 +267,7 @@ static int handle_response(int type, void *payload, size_t sz) if (entry->type + 1 != type) { pr_err("Waiting for IPC type %d, got %d. Ignore.\n", entry->type + 1, type); + continue; } entry->response = kvzalloc(sz, GFP_KERNEL); @@ -295,7 +296,11 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) server_conf.signing = req->signing; server_conf.tcp_port = req->tcp_port; server_conf.ipc_timeout = req->ipc_timeout * HZ; - server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL; + if (check_mul_overflow(req->deadtime, SMB_ECHO_INTERVAL, + &server_conf.deadtime)) { + ret = -EINVAL; + goto out; + } server_conf.share_fake_fscaps = req->share_fake_fscaps; ksmbd_init_domain(req->sub_auth); @@ -318,6 +323,7 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) ret |= ksmbd_set_work_group(req->work_group); ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req), req->ifc_list_sz); +out: if (ret) { pr_err("Server configuration error: %s %s %s\n", req->netbios_name, req->server_string, @@ -567,6 +573,9 @@ ksmbd_ipc_spnego_authen_request(const char *spnego_blob, int blob_len) struct ksmbd_spnego_authen_request *req; struct ksmbd_spnego_authen_response *resp; + if (blob_len > KSMBD_IPC_MAX_PAYLOAD) + return NULL; + msg = ipc_msg_alloc(sizeof(struct ksmbd_spnego_authen_request) + blob_len + 1); if (!msg) @@ -746,6 +755,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_write(struct ksmbd_session *sess, int handle struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; + if (payload_sz > KSMBD_IPC_MAX_PAYLOAD) + return NULL; + msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1); if (!msg) return NULL; @@ -794,6 +806,9 @@ struct ksmbd_rpc_command *ksmbd_rpc_ioctl(struct ksmbd_session *sess, int handle struct ksmbd_rpc_command *req; struct ksmbd_rpc_command *resp; + if (payload_sz > KSMBD_IPC_MAX_PAYLOAD) + return NULL; + msg = ipc_msg_alloc(sizeof(struct ksmbd_rpc_command) + payload_sz + 1); if (!msg) return NULL; diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c index eaae67a2f720..fa647b75fba8 100644 --- a/fs/smb/server/vfs.c +++ b/fs/smb/server/vfs.c @@ -423,10 +423,15 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos, ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n", *pos, count); + if (*pos >= XATTR_SIZE_MAX) { + pr_err("stream write position %lld is out of bounds\n", *pos); + return -EINVAL; + } + size = *pos + count; if (size > XATTR_SIZE_MAX) { size = XATTR_SIZE_MAX; - count = (*pos + count) - XATTR_SIZE_MAX; + count = XATTR_SIZE_MAX - *pos; } v_len = ksmbd_vfs_getcasexattr(user_ns, @@ -493,7 +498,8 @@ int ksmbd_vfs_write(struct ksmbd_work *work, struct ksmbd_file *fp, int err = 0; if (work->conn->connection_type) { - if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) { + if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) || + S_ISDIR(file_inode(fp->filp)->i_mode)) { pr_err("no right to write(%pD)\n", fp->filp); err = -EACCES; goto out; @@ -1249,6 +1255,8 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, filepath, flags, path); + if (!is_last) + next[0] = '/'; if (err) goto out2; else if (is_last) @@ -1256,7 +1264,6 @@ int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name, path_put(parent_path); *parent_path = *path; - next[0] = '/'; remain_len -= filename_len + 1; } diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h index e761dde2443e..cc47e71c4de1 100644 --- a/fs/smb/server/vfs.h +++ b/fs/smb/server/vfs.h @@ -43,6 +43,7 @@ struct ksmbd_dir_info { char *rptr; int name_len; int out_buf_len; + int num_scan; int num_entry; int data_count; int last_entry_offset; diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c index f31649080a88..95a9ff9e2399 100644 --- a/fs/squashfs/inode.c +++ b/fs/squashfs/inode.c @@ -48,6 +48,10 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode, gid_t i_gid; int err; + inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); + if (inode->i_ino == 0) + return -EINVAL; + err = squashfs_get_id(sb, le16_to_cpu(sqsh_ino->uid), &i_uid); if (err) return err; @@ -58,7 +62,6 @@ static int squashfs_new_inode(struct super_block *sb, struct inode *inode, i_uid_write(inode, i_uid); i_gid_write(inode, i_gid); - inode->i_ino = le32_to_cpu(sqsh_ino->inode_number); inode->i_mtime.tv_sec = le32_to_cpu(sqsh_ino->mtime); inode->i_atime.tv_sec = inode->i_mtime.tv_sec; inode->i_ctime.tv_sec = inode->i_mtime.tv_sec; diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 3f128b9fdfbb..9613725ed193 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c @@ -946,16 +946,20 @@ void ubifs_dump_tnc(struct ubifs_info *c) pr_err("\n"); pr_err("(pid %d) start dumping TNC tree\n", current->pid); - znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); - level = znode->level; - pr_err("== Level %d ==\n", level); - while (znode) { - if (level != znode->level) { - level = znode->level; - pr_err("== Level %d ==\n", level); + if (c->zroot.znode) { + znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, NULL); + level = znode->level; + pr_err("== Level %d ==\n", level); + while (znode) { + if (level != znode->level) { + level = znode->level; + pr_err("== Level %d ==\n", level); + } + ubifs_dump_znode(c, znode); + znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); } - ubifs_dump_znode(c, znode); - znode = ubifs_tnc_levelorder_next(c, c->zroot.znode, znode); + } else { + pr_err("empty TNC tree in memory\n"); } pr_err("(pid %d) finish dumping TNC tree\n", current->pid); } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 32cb14759796..d4c3948a7f86 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -777,10 +777,10 @@ static void init_constants_master(struct ubifs_info *c) * necessary to report something for the 'statfs()' call. * * Subtract the LEB reserved for GC, the LEB which is reserved for - * deletions, minimum LEBs for the index, and assume only one journal - * head is available. + * deletions, minimum LEBs for the index, the LEBs which are reserved + * for each journal head. */ - tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt + 1; + tmp64 = c->main_lebs - 1 - 1 - MIN_INDEX_LEBS - c->jhead_cnt; tmp64 *= (long long)c->leb_size - c->leb_overhead; tmp64 = ubifs_reported_space(c, tmp64); c->block_cnt = tmp64 >> UBIFS_BLOCK_SHIFT; diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c index 01362ad5f804..efd52cc53436 100644 --- a/fs/ubifs/tnc_commit.c +++ b/fs/ubifs/tnc_commit.c @@ -657,6 +657,8 @@ static int get_znodes_to_commit(struct ubifs_info *c) znode->alt = 0; cnext = find_next_dirty(znode); if (!cnext) { + ubifs_assert(c, !znode->parent); + znode->cparent = NULL; znode->cnext = c->cnext; break; } diff --git a/fs/udf/directory.c b/fs/udf/directory.c index 04169e428fdf..76dd2e35e098 100644 --- a/fs/udf/directory.c +++ b/fs/udf/directory.c @@ -170,7 +170,7 @@ static struct buffer_head *udf_fiiter_bread_blk(struct udf_fileident_iter *iter) static int udf_fiiter_advance_blk(struct udf_fileident_iter *iter) { iter->loffset++; - if (iter->loffset < iter->elen >> iter->dir->i_blkbits) + if (iter->loffset < DIV_ROUND_UP(iter->elen, 1<dir->i_blkbits)) return 0; iter->loffset = 0; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index d7d6ccd0af06..e2ac428f3809 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -369,29 +369,6 @@ abort: return err; } -static struct buffer_head *udf_getblk(struct inode *inode, udf_pblk_t block, - int create, int *err) -{ - struct buffer_head *bh; - struct buffer_head dummy; - - dummy.b_state = 0; - dummy.b_blocknr = -1000; - *err = udf_get_block(inode, block, &dummy, create); - if (!*err && buffer_mapped(&dummy)) { - bh = sb_getblk(inode->i_sb, dummy.b_blocknr); - if (buffer_new(&dummy)) { - lock_buffer(bh); - memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); - set_buffer_uptodate(bh); - unlock_buffer(bh); - mark_buffer_dirty_inode(bh, inode); - } - return bh; - } - - return NULL; -} /* Extend the file with new blocks totaling 'new_block_bytes', * return the number of extents added @@ -1108,11 +1085,30 @@ struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block, int create, int *err) { struct buffer_head *bh = NULL; + struct buffer_head dummy; - bh = udf_getblk(inode, block, create, err); - if (!bh) + dummy.b_state = 0; + dummy.b_blocknr = -1000; + + *err = udf_get_block(inode, block, &dummy, create); + if (*err || !buffer_mapped(&dummy)) return NULL; + bh = sb_getblk(inode->i_sb, dummy.b_blocknr); + if (!bh) { + *err = -ENOMEM; + return NULL; + } + + if (buffer_new(&dummy)) { + lock_buffer(bh); + memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); + set_buffer_uptodate(bh); + unlock_buffer(bh); + mark_buffer_dirty_inode(bh, inode); + return bh; + } + if (bh_read(bh, 0) >= 0) return bh; diff --git a/fs/unicode/mkutf8data.c b/fs/unicode/mkutf8data.c index e779252be164..bc1a7c8b5c8d 100644 --- a/fs/unicode/mkutf8data.c +++ b/fs/unicode/mkutf8data.c @@ -2230,6 +2230,75 @@ static void nfdicf_init(void) file_fail(fold_name); } +static void ignore_init(void) +{ + FILE *file; + unsigned int unichar; + unsigned int first; + unsigned int last; + unsigned int *um; + int count; + int ret; + + if (verbose > 0) + printf("Parsing %s\n", prop_name); + file = fopen(prop_name, "r"); + if (!file) + open_fail(prop_name, errno); + assert(file); + count = 0; + while (fgets(line, LINESIZE, file)) { + ret = sscanf(line, "%X..%X ; %s # ", &first, &last, buf0); + if (ret == 3) { + if (strcmp(buf0, "Default_Ignorable_Code_Point")) + continue; + if (!utf32valid(first) || !utf32valid(last)) + line_fail(prop_name, line); + for (unichar = first; unichar <= last; unichar++) { + free(unicode_data[unichar].utf32nfdi); + um = malloc(sizeof(unsigned int)); + *um = 0; + unicode_data[unichar].utf32nfdi = um; + free(unicode_data[unichar].utf32nfdicf); + um = malloc(sizeof(unsigned int)); + *um = 0; + unicode_data[unichar].utf32nfdicf = um; + count++; + } + if (verbose > 1) + printf(" %X..%X Default_Ignorable_Code_Point\n", + first, last); + continue; + } + ret = sscanf(line, "%X ; %s # ", &unichar, buf0); + if (ret == 2) { + if (strcmp(buf0, "Default_Ignorable_Code_Point")) + continue; + if (!utf32valid(unichar)) + line_fail(prop_name, line); + free(unicode_data[unichar].utf32nfdi); + um = malloc(sizeof(unsigned int)); + *um = 0; + unicode_data[unichar].utf32nfdi = um; + free(unicode_data[unichar].utf32nfdicf); + um = malloc(sizeof(unsigned int)); + *um = 0; + unicode_data[unichar].utf32nfdicf = um; + if (verbose > 1) + printf(" %X Default_Ignorable_Code_Point\n", + unichar); + count++; + continue; + } + } + fclose(file); + + if (verbose > 0) + printf("Found %d entries\n", count); + if (count == 0) + file_fail(prop_name); +} + static void corrections_init(void) { FILE *file; @@ -3341,6 +3410,7 @@ int main(int argc, char *argv[]) ccc_init(); nfdi_init(); nfdicf_init(); + ignore_init(); corrections_init(); hangul_decompose(); nfdi_decompose(); diff --git a/fs/unicode/utf8-core.c b/fs/unicode/utf8-core.c index 67aaadc3ab07..c887edc4f527 100644 --- a/fs/unicode/utf8-core.c +++ b/fs/unicode/utf8-core.c @@ -198,7 +198,7 @@ struct unicode_map *utf8_load(unsigned int version) return um; out_symbol_put: - symbol_put(um->tables); + symbol_put(utf8_data_table); out_free_um: kfree(um); return ERR_PTR(-EINVAL); diff --git a/fs/unicode/utf8data.c_shipped b/fs/unicode/utf8data.c_shipped index 1aab5257a331..d9b62901aa96 100644 --- a/fs/unicode/utf8data.c_shipped +++ b/fs/unicode/utf8data.c_shipped @@ -82,58 +82,58 @@ static const struct utf8data utf8nfdidata[] = { { 0xc0100, 20736 } }; -static const unsigned char utf8data[64080] = { +static const unsigned char utf8data[64256] = { /* nfdicf_30100 */ - 0xd7,0x07,0x66,0x84,0x0c,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x96,0x1a,0xe3,0x60,0x15, - 0xe2,0x49,0x0e,0xc1,0xe0,0x4b,0x0d,0xcf,0x86,0x65,0x2d,0x0d,0x01,0x00,0xd4,0xb8, - 0xd3,0x27,0xe2,0x03,0xa3,0xe1,0xcb,0x35,0xe0,0x29,0x22,0xcf,0x86,0xc5,0xe4,0xfa, - 0x6c,0xe3,0x45,0x68,0xe2,0xdb,0x65,0xe1,0x0e,0x65,0xe0,0xd3,0x64,0xcf,0x86,0xe5, - 0x98,0x64,0x64,0x7b,0x64,0x0b,0x00,0xd2,0x0e,0xe1,0xb3,0x3c,0xe0,0x34,0xa3,0xcf, - 0x86,0xcf,0x06,0x01,0x00,0xd1,0x0c,0xe0,0x98,0xa8,0xcf,0x86,0xcf,0x06,0x02,0xff, + 0xd7,0x07,0x66,0x84,0x0c,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x99,0x1a,0xe3,0x63,0x15, + 0xe2,0x4c,0x0e,0xc1,0xe0,0x4e,0x0d,0xcf,0x86,0x65,0x2d,0x0d,0x01,0x00,0xd4,0xb8, + 0xd3,0x27,0xe2,0x89,0xa3,0xe1,0xce,0x35,0xe0,0x2c,0x22,0xcf,0x86,0xc5,0xe4,0x15, + 0x6d,0xe3,0x60,0x68,0xe2,0xf6,0x65,0xe1,0x29,0x65,0xe0,0xee,0x64,0xcf,0x86,0xe5, + 0xb3,0x64,0x64,0x96,0x64,0x0b,0x00,0xd2,0x0e,0xe1,0xb5,0x3c,0xe0,0xba,0xa3,0xcf, + 0x86,0xcf,0x06,0x01,0x00,0xd1,0x0c,0xe0,0x1e,0xa9,0xcf,0x86,0xcf,0x06,0x02,0xff, 0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01, - 0x00,0xe4,0xdf,0x45,0xe3,0x39,0x45,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x01,0xad, - 0xd0,0x21,0xcf,0x86,0xe5,0xfb,0xa9,0xe4,0x7a,0xa9,0xe3,0x39,0xa9,0xe2,0x18,0xa9, - 0xe1,0x07,0xa9,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4, - 0x00,0xcf,0x86,0xe5,0xdd,0xab,0xd4,0x19,0xe3,0x1c,0xab,0xe2,0xfb,0xaa,0xe1,0xea, - 0xaa,0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3, - 0x83,0xab,0xe2,0x62,0xab,0xe1,0x51,0xab,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00, - 0x01,0xff,0xe9,0x9b,0xbb,0x00,0x83,0xe2,0x68,0xf9,0xe1,0x52,0xf6,0xe0,0xcf,0xf4, - 0xcf,0x86,0xd5,0x31,0xc4,0xe3,0x51,0x4e,0xe2,0xf2,0x4c,0xe1,0x09,0xcc,0xe0,0x99, - 0x4b,0xcf,0x86,0xe5,0x8b,0x49,0xe4,0xac,0x46,0xe3,0x76,0xbc,0xe2,0xcd,0xbb,0xe1, - 0xa8,0xbb,0xe0,0x81,0xbb,0xcf,0x86,0xe5,0x4e,0xbb,0x94,0x07,0x63,0x39,0xbb,0x07, - 0x00,0x07,0x00,0xe4,0x3b,0xf4,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b, - 0xe1,0x4a,0xe1,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x39,0xe2,0xcf,0x86, - 0xe5,0xfe,0xe1,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x39,0xe2,0xcf,0x06, - 0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xd4,0xf3,0xe3,0xbd,0xf2, - 0xd2,0xa0,0xe1,0x73,0xe6,0xd0,0x21,0xcf,0x86,0xe5,0x74,0xe3,0xe4,0xf0,0xe2,0xe3, - 0xae,0xe2,0xe2,0x8d,0xe2,0xe1,0x7b,0xe2,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00, - 0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xd0,0xe4,0xe3,0x8f,0xe4, - 0xe2,0x6e,0xe4,0xe1,0x5d,0xe4,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff, - 0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0x57,0xe5,0xe1,0x46,0xe5,0x10,0x09, - 0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x77, - 0xe5,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac, - 0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0xbd,0xe5,0xd2,0x14,0xe1,0x8c,0xe5, + 0x00,0xe4,0xe1,0x45,0xe3,0x3b,0x45,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x87,0xad, + 0xd0,0x21,0xcf,0x86,0xe5,0x81,0xaa,0xe4,0x00,0xaa,0xe3,0xbf,0xa9,0xe2,0x9e,0xa9, + 0xe1,0x8d,0xa9,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4, + 0x00,0xcf,0x86,0xe5,0x63,0xac,0xd4,0x19,0xe3,0xa2,0xab,0xe2,0x81,0xab,0xe1,0x70, + 0xab,0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3, + 0x09,0xac,0xe2,0xe8,0xab,0xe1,0xd7,0xab,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00, + 0x01,0xff,0xe9,0x9b,0xbb,0x00,0x83,0xe2,0x19,0xfa,0xe1,0xf2,0xf6,0xe0,0x6f,0xf5, + 0xcf,0x86,0xd5,0x31,0xc4,0xe3,0x54,0x4e,0xe2,0xf5,0x4c,0xe1,0xa4,0xcc,0xe0,0x9c, + 0x4b,0xcf,0x86,0xe5,0x8e,0x49,0xe4,0xaf,0x46,0xe3,0x11,0xbd,0xe2,0x68,0xbc,0xe1, + 0x43,0xbc,0xe0,0x1c,0xbc,0xcf,0x86,0xe5,0xe9,0xbb,0x94,0x07,0x63,0xd4,0xbb,0x07, + 0x00,0x07,0x00,0xe4,0xdb,0xf4,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b, + 0xe1,0xea,0xe1,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xd9,0xe2,0xcf,0x86, + 0xe5,0x9e,0xe2,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xd9,0xe2,0xcf,0x06, + 0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x74,0xf4,0xe3,0x5d,0xf3, + 0xd2,0xa0,0xe1,0x13,0xe7,0xd0,0x21,0xcf,0x86,0xe5,0x14,0xe4,0xe4,0x90,0xe3,0xe3, + 0x4e,0xe3,0xe2,0x2d,0xe3,0xe1,0x1b,0xe3,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00, + 0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x70,0xe5,0xe3,0x2f,0xe5, + 0xe2,0x0e,0xe5,0xe1,0xfd,0xe4,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff, + 0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xf7,0xe5,0xe1,0xe6,0xe5,0x10,0x09, + 0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x17, + 0xe6,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac, + 0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x5d,0xe6,0xd2,0x14,0xe1,0x2c,0xe6, 0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1, - 0x98,0xe5,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00, - 0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0xed,0xea,0xd4,0x19,0xe3,0x26,0xea,0xe2,0x04, - 0xea,0xe1,0xf3,0xe9,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5, - 0xb7,0x00,0xd3,0x18,0xe2,0x70,0xea,0xe1,0x5f,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3, - 0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x88,0xea,0x10, + 0x38,0xe6,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00, + 0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x8d,0xeb,0xd4,0x19,0xe3,0xc6,0xea,0xe2,0xa4, + 0xea,0xe1,0x93,0xea,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5, + 0xb7,0x00,0xd3,0x18,0xe2,0x10,0xeb,0xe1,0xff,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3, + 0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x28,0xeb,0x10, 0x08,0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10, 0x08,0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08, - 0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x8a, - 0xec,0xd4,0x1a,0xe3,0xc2,0xeb,0xe2,0xa8,0xeb,0xe1,0x95,0xeb,0x10,0x08,0x05,0xff, - 0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x0a,0xec, - 0xe1,0xf8,0xeb,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3, - 0x00,0xd2,0x13,0xe1,0x26,0xec,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff, + 0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x2a, + 0xed,0xd4,0x1a,0xe3,0x62,0xec,0xe2,0x48,0xec,0xe1,0x35,0xec,0x10,0x08,0x05,0xff, + 0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0xaa,0xec, + 0xe1,0x98,0xec,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3, + 0x00,0xd2,0x13,0xe1,0xc6,0xec,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff, 0xe7,0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05, 0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05, - 0xff,0xe7,0xaa,0xae,0x00,0xe0,0x3c,0xef,0xcf,0x86,0xd5,0x1d,0xe4,0xb1,0xed,0xe3, - 0x6d,0xed,0xe2,0x4b,0xed,0xe1,0x3a,0xed,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f, - 0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x58,0xee,0xe2,0x34,0xee,0xe1, - 0x23,0xee,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00, - 0xd3,0x18,0xe2,0xa3,0xee,0xe1,0x92,0xee,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1, - 0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xbb,0xee,0x10,0x08,0x05, + 0xff,0xe7,0xaa,0xae,0x00,0xe0,0xdc,0xef,0xcf,0x86,0xd5,0x1d,0xe4,0x51,0xee,0xe3, + 0x0d,0xee,0xe2,0xeb,0xed,0xe1,0xda,0xed,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f, + 0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xf8,0xee,0xe2,0xd4,0xee,0xe1, + 0xc3,0xee,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00, + 0xd3,0x18,0xe2,0x43,0xef,0xe1,0x32,0xef,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1, + 0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x5b,0xef,0x10,0x08,0x05, 0xff,0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05, 0xff,0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8, 0x9e,0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, @@ -141,152 +141,152 @@ static const unsigned char utf8data[64080] = { 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* nfdi_30100 */ - 0x57,0x04,0x01,0x00,0xc6,0xd5,0x13,0xe4,0xa8,0x59,0xe3,0xe2,0x54,0xe2,0x5b,0x4f, - 0xc1,0xe0,0x87,0x4d,0xcf,0x06,0x01,0x00,0xd4,0xb8,0xd3,0x27,0xe2,0x89,0x9f,0xe1, - 0x91,0x8d,0xe0,0x21,0x71,0xcf,0x86,0xc5,0xe4,0x80,0x69,0xe3,0xcb,0x64,0xe2,0x61, - 0x62,0xe1,0x94,0x61,0xe0,0x59,0x61,0xcf,0x86,0xe5,0x1e,0x61,0x64,0x01,0x61,0x0b, - 0x00,0xd2,0x0e,0xe1,0x3f,0xa0,0xe0,0xba,0x9f,0xcf,0x86,0xcf,0x06,0x01,0x00,0xd1, - 0x0c,0xe0,0x1e,0xa5,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf, - 0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x1b,0xb6,0xe3,0x95, - 0xad,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x87,0xa9,0xd0,0x21,0xcf,0x86,0xe5,0x81, - 0xa6,0xe4,0x00,0xa6,0xe3,0xbf,0xa5,0xe2,0x9e,0xa5,0xe1,0x8d,0xa5,0x10,0x08,0x01, - 0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0xcf,0x86,0xe5,0x63,0xa8, - 0xd4,0x19,0xe3,0xa2,0xa7,0xe2,0x81,0xa7,0xe1,0x70,0xa7,0x10,0x08,0x01,0xff,0xe9, - 0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,0x09,0xa8,0xe2,0xe8,0xa7,0xe1, - 0xd7,0xa7,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9,0x9b,0xbb,0x00, - 0x83,0xe2,0xee,0xf5,0xe1,0xd8,0xf2,0xe0,0x55,0xf1,0xcf,0x86,0xd5,0x31,0xc4,0xe3, - 0xd5,0xcb,0xe2,0xae,0xc9,0xe1,0x8f,0xc8,0xe0,0x1f,0xbf,0xcf,0x86,0xe5,0x12,0xbb, - 0xe4,0x0b,0xba,0xe3,0xfc,0xb8,0xe2,0x53,0xb8,0xe1,0x2e,0xb8,0xe0,0x07,0xb8,0xcf, - 0x86,0xe5,0xd4,0xb7,0x94,0x07,0x63,0xbf,0xb7,0x07,0x00,0x07,0x00,0xe4,0xc1,0xf0, - 0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0xd0,0xdd,0xcf,0x86,0xcf, - 0x06,0x05,0x00,0xd1,0x0e,0xe0,0xbf,0xde,0xcf,0x86,0xe5,0x84,0xde,0xcf,0x06,0x11, - 0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xbf,0xde,0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06, - 0xcf,0x06,0x00,0x00,0xe4,0x5a,0xf0,0xe3,0x43,0xef,0xd2,0xa0,0xe1,0xf9,0xe2,0xd0, - 0x21,0xcf,0x86,0xe5,0xfa,0xdf,0xe4,0x76,0xdf,0xe3,0x34,0xdf,0xe2,0x13,0xdf,0xe1, - 0x01,0xdf,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00, - 0xcf,0x86,0xd5,0x1c,0xe4,0x56,0xe1,0xe3,0x15,0xe1,0xe2,0xf4,0xe0,0xe1,0xe3,0xe0, - 0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34, - 0xd3,0x18,0xe2,0xdd,0xe1,0xe1,0xcc,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa1,0x9a,0xa8, - 0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0xfd,0xe1,0x91,0x11,0x10,0x09,0x05, - 0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac, - 0xbe,0x00,0xe3,0x43,0xe2,0xd2,0x14,0xe1,0x12,0xe2,0x10,0x08,0x05,0xff,0xe5,0xaf, - 0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x1e,0xe2,0x10,0x08,0x05,0xff, - 0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86, - 0xe5,0x73,0xe7,0xd4,0x19,0xe3,0xac,0xe6,0xe2,0x8a,0xe6,0xe1,0x79,0xe6,0x10,0x08, - 0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0xf6, - 0xe6,0xe1,0xe5,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0, - 0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x0e,0xe7,0x10,0x08,0x05,0xff,0xe7,0x81,0xbd, - 0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff,0xe7,0x85,0x85, - 0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7,0x86,0x9c,0x00, - 0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x10,0xe9,0xd4,0x1a,0xe3,0x48,0xe8, - 0xe2,0x2e,0xe8,0xe1,0x1b,0xe8,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4,0x00,0x05,0xff, - 0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x90,0xe8,0xe1,0x7e,0xe8,0x10,0x08,0x05, - 0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13,0xe1,0xac,0xe8, - 0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80,0x00,0xd1,0x12, - 0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00, - 0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0, - 0xc2,0xeb,0xcf,0x86,0xd5,0x1d,0xe4,0x37,0xea,0xe3,0xf3,0xe9,0xe2,0xd1,0xe9,0xe1, - 0xc0,0xe9,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95, - 0x00,0xd4,0x19,0xe3,0xde,0xea,0xe2,0xba,0xea,0xe1,0xa9,0xea,0x10,0x08,0x05,0xff, - 0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2,0x29,0xeb,0xe1, - 0x18,0xeb,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83, - 0x92,0x00,0xd2,0x13,0xe1,0x41,0xeb,0x10,0x08,0x05,0xff,0xe8,0x9a,0x88,0x00,0x05, - 0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c,0xa8,0x00,0x05, - 0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00,0x05,0xff,0xe4, - 0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x57,0x04,0x01,0x00,0xc6,0xd5,0x16,0xe4,0xc2,0x59,0xe3,0xfb,0x54,0xe2,0x74,0x4f, + 0xc1,0xe0,0xa0,0x4d,0xcf,0x86,0x65,0x84,0x4d,0x01,0x00,0xd4,0xb8,0xd3,0x27,0xe2, + 0x0c,0xa0,0xe1,0xdf,0x8d,0xe0,0x39,0x71,0xcf,0x86,0xc5,0xe4,0x98,0x69,0xe3,0xe3, + 0x64,0xe2,0x79,0x62,0xe1,0xac,0x61,0xe0,0x71,0x61,0xcf,0x86,0xe5,0x36,0x61,0x64, + 0x19,0x61,0x0b,0x00,0xd2,0x0e,0xe1,0xc2,0xa0,0xe0,0x3d,0xa0,0xcf,0x86,0xcf,0x06, + 0x01,0x00,0xd1,0x0c,0xe0,0xa1,0xa5,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd0,0x08, + 0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x9e, + 0xb6,0xe3,0x18,0xae,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x0a,0xaa,0xd0,0x21,0xcf, + 0x86,0xe5,0x04,0xa7,0xe4,0x83,0xa6,0xe3,0x42,0xa6,0xe2,0x21,0xa6,0xe1,0x10,0xa6, + 0x10,0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0xcf,0x86, + 0xe5,0xe6,0xa8,0xd4,0x19,0xe3,0x25,0xa8,0xe2,0x04,0xa8,0xe1,0xf3,0xa7,0x10,0x08, + 0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0xe3,0x8c,0xa8,0xe2, + 0x6b,0xa8,0xe1,0x5a,0xa8,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9, + 0x9b,0xbb,0x00,0x83,0xe2,0x9c,0xf6,0xe1,0x75,0xf3,0xe0,0xf2,0xf1,0xcf,0x86,0xd5, + 0x31,0xc4,0xe3,0x6d,0xcc,0xe2,0x46,0xca,0xe1,0x27,0xc9,0xe0,0xb7,0xbf,0xcf,0x86, + 0xe5,0xaa,0xbb,0xe4,0xa3,0xba,0xe3,0x94,0xb9,0xe2,0xeb,0xb8,0xe1,0xc6,0xb8,0xe0, + 0x9f,0xb8,0xcf,0x86,0xe5,0x6c,0xb8,0x94,0x07,0x63,0x57,0xb8,0x07,0x00,0x07,0x00, + 0xe4,0x5e,0xf1,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0x6d,0xde, + 0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x5c,0xdf,0xcf,0x86,0xe5,0x21,0xdf, + 0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x5c,0xdf,0xcf,0x06,0x13,0x00,0xcf, + 0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xf7,0xf0,0xe3,0xe0,0xef,0xd2,0xa0,0xe1, + 0x96,0xe3,0xd0,0x21,0xcf,0x86,0xe5,0x97,0xe0,0xe4,0x13,0xe0,0xe3,0xd1,0xdf,0xe2, + 0xb0,0xdf,0xe1,0x9e,0xdf,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4, + 0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xf3,0xe1,0xe3,0xb2,0xe1,0xe2,0x91,0xe1, + 0xe1,0x80,0xe1,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6, + 0x00,0xd4,0x34,0xd3,0x18,0xe2,0x7a,0xe2,0xe1,0x69,0xe2,0x10,0x09,0x05,0xff,0xf0, + 0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x9a,0xe2,0x91,0x11, + 0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05, + 0xff,0xe5,0xac,0xbe,0x00,0xe3,0xe0,0xe2,0xd2,0x14,0xe1,0xaf,0xe2,0x10,0x08,0x05, + 0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0xbb,0xe2,0x10, + 0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0, + 0x6a,0xcf,0x86,0xe5,0x10,0xe8,0xd4,0x19,0xe3,0x49,0xe7,0xe2,0x27,0xe7,0xe1,0x16, + 0xe7,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3, + 0x18,0xe2,0x93,0xe7,0xe1,0x82,0xe7,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00, + 0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xab,0xe7,0x10,0x08,0x05,0xff, + 0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff, + 0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7, + 0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0xad,0xe9,0xd4,0x1a, + 0xe3,0xe5,0xe8,0xe2,0xcb,0xe8,0xe1,0xb8,0xe8,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4, + 0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x2d,0xe9,0xe1,0x1b,0xe9, + 0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13, + 0xe1,0x49,0xe9,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80, + 0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5, + 0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa, + 0xae,0x00,0xe0,0x5f,0xec,0xcf,0x86,0xd5,0x1d,0xe4,0xd4,0xea,0xe3,0x90,0xea,0xe2, + 0x6e,0xea,0xe1,0x5d,0xea,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff, + 0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x7b,0xeb,0xe2,0x57,0xeb,0xe1,0x46,0xeb,0x10, + 0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2, + 0xc6,0xeb,0xe1,0xb5,0xeb,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff, + 0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xde,0xeb,0x10,0x08,0x05,0xff,0xe8,0x9a, + 0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c, + 0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00, + 0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* nfdicf_30200 */ - 0xd7,0x07,0x66,0x84,0x05,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x96,0x13,0xe3,0x60,0x0e, - 0xe2,0x49,0x07,0xc1,0xe0,0x4b,0x06,0xcf,0x86,0x65,0x2d,0x06,0x01,0x00,0xd4,0x2a, - 0xe3,0xce,0x35,0xe2,0x02,0x9c,0xe1,0xca,0x2e,0xe0,0x28,0x1b,0xcf,0x86,0xc5,0xe4, - 0xf9,0x65,0xe3,0x44,0x61,0xe2,0xda,0x5e,0xe1,0x0d,0x5e,0xe0,0xd2,0x5d,0xcf,0x86, - 0xe5,0x97,0x5d,0x64,0x7a,0x5d,0x0b,0x00,0x83,0xe2,0xf6,0xf2,0xe1,0xe0,0xef,0xe0, - 0x5d,0xee,0xcf,0x86,0xd5,0x31,0xc4,0xe3,0xdf,0x47,0xe2,0x80,0x46,0xe1,0x97,0xc5, - 0xe0,0x27,0x45,0xcf,0x86,0xe5,0x19,0x43,0xe4,0x3a,0x40,0xe3,0x04,0xb6,0xe2,0x5b, - 0xb5,0xe1,0x36,0xb5,0xe0,0x0f,0xb5,0xcf,0x86,0xe5,0xdc,0xb4,0x94,0x07,0x63,0xc7, - 0xb4,0x07,0x00,0x07,0x00,0xe4,0xc9,0xed,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00, - 0xd2,0x0b,0xe1,0xd8,0xda,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xc7,0xdb, - 0xcf,0x86,0xe5,0x8c,0xdb,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xc7,0xdb, - 0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x62,0xed,0xe3, - 0x4b,0xec,0xd2,0xa0,0xe1,0x01,0xe0,0xd0,0x21,0xcf,0x86,0xe5,0x02,0xdd,0xe4,0x7e, - 0xdc,0xe3,0x3c,0xdc,0xe2,0x1b,0xdc,0xe1,0x09,0xdc,0x10,0x08,0x05,0xff,0xe4,0xb8, - 0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x5e,0xde,0xe3, - 0x1d,0xde,0xe2,0xfc,0xdd,0xe1,0xeb,0xdd,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00, - 0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xe5,0xde,0xe1,0xd4,0xde, + 0xd7,0x07,0x66,0x84,0x05,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x99,0x13,0xe3,0x63,0x0e, + 0xe2,0x4c,0x07,0xc1,0xe0,0x4e,0x06,0xcf,0x86,0x65,0x2d,0x06,0x01,0x00,0xd4,0x2a, + 0xe3,0xd0,0x35,0xe2,0x88,0x9c,0xe1,0xcd,0x2e,0xe0,0x2b,0x1b,0xcf,0x86,0xc5,0xe4, + 0x14,0x66,0xe3,0x5f,0x61,0xe2,0xf5,0x5e,0xe1,0x28,0x5e,0xe0,0xed,0x5d,0xcf,0x86, + 0xe5,0xb2,0x5d,0x64,0x95,0x5d,0x0b,0x00,0x83,0xe2,0xa7,0xf3,0xe1,0x80,0xf0,0xe0, + 0xfd,0xee,0xcf,0x86,0xd5,0x31,0xc4,0xe3,0xe2,0x47,0xe2,0x83,0x46,0xe1,0x32,0xc6, + 0xe0,0x2a,0x45,0xcf,0x86,0xe5,0x1c,0x43,0xe4,0x3d,0x40,0xe3,0x9f,0xb6,0xe2,0xf6, + 0xb5,0xe1,0xd1,0xb5,0xe0,0xaa,0xb5,0xcf,0x86,0xe5,0x77,0xb5,0x94,0x07,0x63,0x62, + 0xb5,0x07,0x00,0x07,0x00,0xe4,0x69,0xee,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00, + 0xd2,0x0b,0xe1,0x78,0xdb,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x67,0xdc, + 0xcf,0x86,0xe5,0x2c,0xdc,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x67,0xdc, + 0xcf,0x06,0x13,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x02,0xee,0xe3, + 0xeb,0xec,0xd2,0xa0,0xe1,0xa1,0xe0,0xd0,0x21,0xcf,0x86,0xe5,0xa2,0xdd,0xe4,0x1e, + 0xdd,0xe3,0xdc,0xdc,0xe2,0xbb,0xdc,0xe1,0xa9,0xdc,0x10,0x08,0x05,0xff,0xe4,0xb8, + 0xbd,0x00,0x05,0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xfe,0xde,0xe3, + 0xbd,0xde,0xe2,0x9c,0xde,0xe1,0x8b,0xde,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00, + 0x05,0xff,0xe5,0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0x85,0xdf,0xe1,0x74,0xdf, 0x10,0x09,0x05,0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00, - 0xe2,0x05,0xdf,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff, - 0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x4b,0xdf,0xd2,0x14,0xe1, - 0x1a,0xdf,0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98, - 0x00,0xe1,0x26,0xdf,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0, - 0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x7b,0xe4,0xd4,0x19,0xe3,0xb4,0xe3, - 0xe2,0x92,0xe3,0xe1,0x81,0xe3,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff, - 0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0xfe,0xe3,0xe1,0xed,0xe3,0x10,0x09,0x05,0xff, - 0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x16, + 0xe2,0xa5,0xdf,0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff, + 0xe5,0xac,0x88,0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0xeb,0xdf,0xd2,0x14,0xe1, + 0xba,0xdf,0x10,0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98, + 0x00,0xe1,0xc6,0xdf,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0, + 0xa2,0x00,0xd1,0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x1b,0xe5,0xd4,0x19,0xe3,0x54,0xe4, + 0xe2,0x32,0xe4,0xe1,0x21,0xe4,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff, + 0xe6,0xb5,0xb7,0x00,0xd3,0x18,0xe2,0x9e,0xe4,0xe1,0x8d,0xe4,0x10,0x09,0x05,0xff, + 0xf0,0xa3,0xbd,0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xb6, 0xe4,0x10,0x08,0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1, 0x11,0x10,0x08,0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00, 0x10,0x08,0x05,0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86, - 0xe5,0x18,0xe6,0xd4,0x1a,0xe3,0x50,0xe5,0xe2,0x36,0xe5,0xe1,0x23,0xe5,0x10,0x08, + 0xe5,0xb8,0xe6,0xd4,0x1a,0xe3,0xf0,0xe5,0xe2,0xd6,0xe5,0xe1,0xc3,0xe5,0x10,0x08, 0x05,0xff,0xe7,0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2, - 0x98,0xe5,0xe1,0x86,0xe5,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4, - 0x83,0xa3,0x00,0xd2,0x13,0xe1,0xb4,0xe5,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00, + 0x38,0xe6,0xe1,0x26,0xe6,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4, + 0x83,0xa3,0x00,0xd2,0x13,0xe1,0x54,0xe6,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00, 0x05,0xff,0xe7,0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc, 0x00,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7, - 0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0,0xca,0xe8,0xcf,0x86,0xd5,0x1d,0xe4,0x3f, - 0xe7,0xe3,0xfb,0xe6,0xe2,0xd9,0xe6,0xe1,0xc8,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa3, - 0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xe6,0xe7,0xe2,0xc2, - 0xe7,0xe1,0xb1,0xe7,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f, - 0x8a,0x00,0xd3,0x18,0xe2,0x31,0xe8,0xe1,0x20,0xe8,0x10,0x09,0x05,0xff,0xf0,0xa6, - 0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x49,0xe8,0x10, + 0x00,0x05,0xff,0xe7,0xaa,0xae,0x00,0xe0,0x6a,0xe9,0xcf,0x86,0xd5,0x1d,0xe4,0xdf, + 0xe7,0xe3,0x9b,0xe7,0xe2,0x79,0xe7,0xe1,0x68,0xe7,0x10,0x09,0x05,0xff,0xf0,0xa3, + 0x8d,0x9f,0x00,0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0x86,0xe8,0xe2,0x62, + 0xe8,0xe1,0x51,0xe8,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f, + 0x8a,0x00,0xd3,0x18,0xe2,0xd1,0xe8,0xe1,0xc0,0xe8,0x10,0x09,0x05,0xff,0xf0,0xa6, + 0xbe,0xb1,0x00,0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0xe9,0xe8,0x10, 0x08,0x05,0xff,0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10, 0x08,0x05,0xff,0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05, 0xff,0xe8,0x9e,0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00, /* nfdi_30200 */ - 0x57,0x04,0x01,0x00,0xc6,0xd5,0x13,0xe4,0x68,0x53,0xe3,0xa2,0x4e,0xe2,0x1b,0x49, - 0xc1,0xe0,0x47,0x47,0xcf,0x06,0x01,0x00,0xd4,0x2a,0xe3,0x99,0x99,0xe2,0x48,0x99, - 0xe1,0x50,0x87,0xe0,0xe0,0x6a,0xcf,0x86,0xc5,0xe4,0x3f,0x63,0xe3,0x8a,0x5e,0xe2, - 0x20,0x5c,0xe1,0x53,0x5b,0xe0,0x18,0x5b,0xcf,0x86,0xe5,0xdd,0x5a,0x64,0xc0,0x5a, - 0x0b,0x00,0x83,0xe2,0x3c,0xf0,0xe1,0x26,0xed,0xe0,0xa3,0xeb,0xcf,0x86,0xd5,0x31, - 0xc4,0xe3,0x23,0xc6,0xe2,0xfc,0xc3,0xe1,0xdd,0xc2,0xe0,0x6d,0xb9,0xcf,0x86,0xe5, - 0x60,0xb5,0xe4,0x59,0xb4,0xe3,0x4a,0xb3,0xe2,0xa1,0xb2,0xe1,0x7c,0xb2,0xe0,0x55, - 0xb2,0xcf,0x86,0xe5,0x22,0xb2,0x94,0x07,0x63,0x0d,0xb2,0x07,0x00,0x07,0x00,0xe4, - 0x0f,0xeb,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1,0x1e,0xd8,0xcf, - 0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0x0d,0xd9,0xcf,0x86,0xe5,0xd2,0xd8,0xcf, - 0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0x0d,0xd9,0xcf,0x06,0x13,0x00,0xcf,0x86, - 0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0xa8,0xea,0xe3,0x91,0xe9,0xd2,0xa0,0xe1,0x47, - 0xdd,0xd0,0x21,0xcf,0x86,0xe5,0x48,0xda,0xe4,0xc4,0xd9,0xe3,0x82,0xd9,0xe2,0x61, - 0xd9,0xe1,0x4f,0xd9,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05,0xff,0xe4,0xb8, - 0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0xa4,0xdb,0xe3,0x63,0xdb,0xe2,0x42,0xdb,0xe1, - 0x31,0xdb,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5,0x93,0xb6,0x00, - 0xd4,0x34,0xd3,0x18,0xe2,0x2b,0xdc,0xe1,0x1a,0xdc,0x10,0x09,0x05,0xff,0xf0,0xa1, - 0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0x4b,0xdc,0x91,0x11,0x10, - 0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88,0x00,0x05,0xff, - 0xe5,0xac,0xbe,0x00,0xe3,0x91,0xdc,0xd2,0x14,0xe1,0x60,0xdc,0x10,0x08,0x05,0xff, - 0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x6c,0xdc,0x10,0x08, - 0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1,0xd5,0xd0,0x6a, - 0xcf,0x86,0xe5,0xc1,0xe1,0xd4,0x19,0xe3,0xfa,0xe0,0xe2,0xd8,0xe0,0xe1,0xc7,0xe0, - 0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7,0x00,0xd3,0x18, - 0xe2,0x44,0xe1,0xe1,0x33,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd,0x9e,0x00,0x05, - 0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0x5c,0xe1,0x10,0x08,0x05,0xff,0xe7, - 0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08,0x05,0xff,0xe7, - 0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05,0xff,0xe7,0x86, - 0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0x5e,0xe3,0xd4,0x1a,0xe3, - 0x96,0xe2,0xe2,0x7c,0xe2,0xe1,0x69,0xe2,0x10,0x08,0x05,0xff,0xe7,0x9b,0xb4,0x00, - 0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0xde,0xe2,0xe1,0xcc,0xe2,0x10, - 0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00,0xd2,0x13,0xe1, - 0xfa,0xe2,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7,0xa9,0x80,0x00, - 0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff,0xf0,0xa5,0xaa, - 0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff,0xe7,0xaa,0xae, - 0x00,0xe0,0x10,0xe6,0xcf,0x86,0xd5,0x1d,0xe4,0x85,0xe4,0xe3,0x41,0xe4,0xe2,0x1f, - 0xe4,0xe1,0x0e,0xe4,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00,0x05,0xff,0xe4, - 0x8f,0x95,0x00,0xd4,0x19,0xe3,0x2c,0xe5,0xe2,0x08,0xe5,0xe1,0xf7,0xe4,0x10,0x08, - 0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3,0x18,0xe2,0x77, - 0xe5,0xe1,0x66,0xe5,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00,0x05,0xff,0xf0, - 0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x8f,0xe5,0x10,0x08,0x05,0xff,0xe8,0x9a,0x88, - 0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff,0xe8,0x9c,0xa8, - 0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e,0x86,0x00,0x05, - 0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x57,0x04,0x01,0x00,0xc6,0xd5,0x16,0xe4,0x82,0x53,0xe3,0xbb,0x4e,0xe2,0x34,0x49, + 0xc1,0xe0,0x60,0x47,0xcf,0x86,0x65,0x44,0x47,0x01,0x00,0xd4,0x2a,0xe3,0x1c,0x9a, + 0xe2,0xcb,0x99,0xe1,0x9e,0x87,0xe0,0xf8,0x6a,0xcf,0x86,0xc5,0xe4,0x57,0x63,0xe3, + 0xa2,0x5e,0xe2,0x38,0x5c,0xe1,0x6b,0x5b,0xe0,0x30,0x5b,0xcf,0x86,0xe5,0xf5,0x5a, + 0x64,0xd8,0x5a,0x0b,0x00,0x83,0xe2,0xea,0xf0,0xe1,0xc3,0xed,0xe0,0x40,0xec,0xcf, + 0x86,0xd5,0x31,0xc4,0xe3,0xbb,0xc6,0xe2,0x94,0xc4,0xe1,0x75,0xc3,0xe0,0x05,0xba, + 0xcf,0x86,0xe5,0xf8,0xb5,0xe4,0xf1,0xb4,0xe3,0xe2,0xb3,0xe2,0x39,0xb3,0xe1,0x14, + 0xb3,0xe0,0xed,0xb2,0xcf,0x86,0xe5,0xba,0xb2,0x94,0x07,0x63,0xa5,0xb2,0x07,0x00, + 0x07,0x00,0xe4,0xac,0xeb,0xd3,0x08,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd2,0x0b,0xe1, + 0xbb,0xd8,0xcf,0x86,0xcf,0x06,0x05,0x00,0xd1,0x0e,0xe0,0xaa,0xd9,0xcf,0x86,0xe5, + 0x6f,0xd9,0xcf,0x06,0x11,0x00,0xd0,0x0b,0xcf,0x86,0xe5,0xaa,0xd9,0xcf,0x06,0x13, + 0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x45,0xeb,0xe3,0x2e,0xea,0xd2, + 0xa0,0xe1,0xe4,0xdd,0xd0,0x21,0xcf,0x86,0xe5,0xe5,0xda,0xe4,0x61,0xda,0xe3,0x1f, + 0xda,0xe2,0xfe,0xd9,0xe1,0xec,0xd9,0x10,0x08,0x05,0xff,0xe4,0xb8,0xbd,0x00,0x05, + 0xff,0xe4,0xb8,0xb8,0x00,0xcf,0x86,0xd5,0x1c,0xe4,0x41,0xdc,0xe3,0x00,0xdc,0xe2, + 0xdf,0xdb,0xe1,0xce,0xdb,0x10,0x08,0x05,0xff,0xe5,0x92,0xa2,0x00,0x05,0xff,0xe5, + 0x93,0xb6,0x00,0xd4,0x34,0xd3,0x18,0xe2,0xc8,0xdc,0xe1,0xb7,0xdc,0x10,0x09,0x05, + 0xff,0xf0,0xa1,0x9a,0xa8,0x00,0x05,0xff,0xf0,0xa1,0x9b,0xaa,0x00,0xe2,0xe8,0xdc, + 0x91,0x11,0x10,0x09,0x05,0xff,0xf0,0xa1,0x8d,0xaa,0x00,0x05,0xff,0xe5,0xac,0x88, + 0x00,0x05,0xff,0xe5,0xac,0xbe,0x00,0xe3,0x2e,0xdd,0xd2,0x14,0xe1,0xfd,0xdc,0x10, + 0x08,0x05,0xff,0xe5,0xaf,0xb3,0x00,0x05,0xff,0xf0,0xa1,0xac,0x98,0x00,0xe1,0x09, + 0xdd,0x10,0x08,0x05,0xff,0xe5,0xbc,0xb3,0x00,0x05,0xff,0xe5,0xb0,0xa2,0x00,0xd1, + 0xd5,0xd0,0x6a,0xcf,0x86,0xe5,0x5e,0xe2,0xd4,0x19,0xe3,0x97,0xe1,0xe2,0x75,0xe1, + 0xe1,0x64,0xe1,0x10,0x08,0x05,0xff,0xe6,0xb4,0xbe,0x00,0x05,0xff,0xe6,0xb5,0xb7, + 0x00,0xd3,0x18,0xe2,0xe1,0xe1,0xe1,0xd0,0xe1,0x10,0x09,0x05,0xff,0xf0,0xa3,0xbd, + 0x9e,0x00,0x05,0xff,0xf0,0xa3,0xbe,0x8e,0x00,0xd2,0x13,0xe1,0xf9,0xe1,0x10,0x08, + 0x05,0xff,0xe7,0x81,0xbd,0x00,0x05,0xff,0xe7,0x81,0xb7,0x00,0xd1,0x11,0x10,0x08, + 0x05,0xff,0xe7,0x85,0x85,0x00,0x05,0xff,0xf0,0xa4,0x89,0xa3,0x00,0x10,0x08,0x05, + 0xff,0xe7,0x86,0x9c,0x00,0x05,0xff,0xe4,0x8e,0xab,0x00,0xcf,0x86,0xe5,0xfb,0xe3, + 0xd4,0x1a,0xe3,0x33,0xe3,0xe2,0x19,0xe3,0xe1,0x06,0xe3,0x10,0x08,0x05,0xff,0xe7, + 0x9b,0xb4,0x00,0x05,0xff,0xf0,0xa5,0x83,0xb3,0x00,0xd3,0x16,0xe2,0x7b,0xe3,0xe1, + 0x69,0xe3,0x10,0x08,0x05,0xff,0xe7,0xa3,0x8c,0x00,0x05,0xff,0xe4,0x83,0xa3,0x00, + 0xd2,0x13,0xe1,0x97,0xe3,0x10,0x08,0x05,0xff,0xe4,0x84,0xaf,0x00,0x05,0xff,0xe7, + 0xa9,0x80,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0xa5,0xa5,0xbc,0x00,0x05,0xff, + 0xf0,0xa5,0xaa,0xa7,0x00,0x10,0x09,0x05,0xff,0xf0,0xa5,0xaa,0xa7,0x00,0x05,0xff, + 0xe7,0xaa,0xae,0x00,0xe0,0xad,0xe6,0xcf,0x86,0xd5,0x1d,0xe4,0x22,0xe5,0xe3,0xde, + 0xe4,0xe2,0xbc,0xe4,0xe1,0xab,0xe4,0x10,0x09,0x05,0xff,0xf0,0xa3,0x8d,0x9f,0x00, + 0x05,0xff,0xe4,0x8f,0x95,0x00,0xd4,0x19,0xe3,0xc9,0xe5,0xe2,0xa5,0xe5,0xe1,0x94, + 0xe5,0x10,0x08,0x05,0xff,0xe8,0x8d,0x93,0x00,0x05,0xff,0xe8,0x8f,0x8a,0x00,0xd3, + 0x18,0xe2,0x14,0xe6,0xe1,0x03,0xe6,0x10,0x09,0x05,0xff,0xf0,0xa6,0xbe,0xb1,0x00, + 0x05,0xff,0xf0,0xa7,0x83,0x92,0x00,0xd2,0x13,0xe1,0x2c,0xe6,0x10,0x08,0x05,0xff, + 0xe8,0x9a,0x88,0x00,0x05,0xff,0xe8,0x9c,0x8e,0x00,0xd1,0x10,0x10,0x08,0x05,0xff, + 0xe8,0x9c,0xa8,0x00,0x05,0xff,0xe8,0x9d,0xab,0x00,0x10,0x08,0x05,0xff,0xe8,0x9e, + 0x86,0x00,0x05,0xff,0xe4,0xb5,0x97,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* nfdicf_c0100 */ 0xd7,0xb0,0x56,0x04,0x01,0x00,0x95,0xa8,0xd4,0x5e,0xd3,0x2e,0xd2,0x16,0xd1,0x0a, 0x10,0x04,0x01,0x00,0x01,0xff,0x61,0x00,0x10,0x06,0x01,0xff,0x62,0x00,0x01,0xff, @@ -299,3174 +299,3184 @@ static const unsigned char utf8data[64080] = { 0xd1,0x0c,0x10,0x06,0x01,0xff,0x74,0x00,0x01,0xff,0x75,0x00,0x10,0x06,0x01,0xff, 0x76,0x00,0x01,0xff,0x77,0x00,0x92,0x16,0xd1,0x0c,0x10,0x06,0x01,0xff,0x78,0x00, 0x01,0xff,0x79,0x00,0x10,0x06,0x01,0xff,0x7a,0x00,0x01,0x00,0x01,0x00,0x01,0x00, - 0xc6,0xe5,0xf6,0x14,0xe4,0x6c,0x0d,0xe3,0x36,0x08,0xe2,0x1f,0x01,0xc1,0xd0,0x21, - 0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x13,0x52,0x04,0x01,0x00, - 0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xce,0xbc,0x00,0x01,0x00,0x01,0x00,0xcf, - 0x86,0xe5,0x9d,0x44,0xd4,0x7f,0xd3,0x3f,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x61,0xcc, - 0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc, - 0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x07,0x01,0xff,0xc3,0xa6,0x00,0x01, + 0xc6,0xe5,0xf9,0x14,0xe4,0x6f,0x0d,0xe3,0x39,0x08,0xe2,0x22,0x01,0xc1,0xd0,0x24, + 0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x07,0x63,0xd8,0x43,0x01,0x00,0x93,0x13,0x52, + 0x04,0x01,0x00,0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xce,0xbc,0x00,0x01,0x00, + 0x01,0x00,0xcf,0x86,0xe5,0xb3,0x44,0xd4,0x7f,0xd3,0x3f,0xd2,0x20,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01, + 0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x07,0x01,0xff,0xc3, + 0xa6,0x00,0x01,0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, + 0x65,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x65,0xcc, + 0x82,0x00,0x01,0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc, + 0x80,0x00,0x01,0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x82,0x00, + 0x01,0xff,0x69,0xcc,0x88,0x00,0xd3,0x3b,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff, + 0xc3,0xb0,0x00,0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80, + 0x00,0x01,0xff,0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82, + 0x00,0x01,0xff,0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01, + 0x00,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb8,0x00,0x01,0xff,0x75,0xcc, + 0x80,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00, + 0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00, + 0x10,0x07,0x01,0xff,0xc3,0xbe,0x00,0x01,0xff,0x73,0x73,0x00,0xe1,0xd4,0x03,0xe0, + 0xeb,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x01,0xff,0x61,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff, + 0x61,0xcc,0x86,0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, + 0x61,0xcc,0xa8,0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x63,0xcc, + 0x81,0x00,0x01,0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, + 0x63,0xcc,0x82,0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x63,0xcc, + 0x87,0x00,0x01,0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc, + 0x8c,0x00,0x01,0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x8c,0x00, + 0x01,0xff,0x64,0xcc,0x8c,0x00,0xd3,0x3b,0xd2,0x1b,0xd1,0x0b,0x10,0x07,0x01,0xff, + 0xc4,0x91,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x84,0x00,0x01,0xff,0x65, + 0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x86,0x00,0x01,0xff,0x65, + 0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87, + 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa8,0x00,0x01,0xff,0x65, + 0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82, + 0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4, + 0x7b,0xd3,0x3b,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x87,0x00,0x01, + 0xff,0x67,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0xa7,0x00,0x01,0xff,0x67, + 0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0xff,0x68, + 0xcc,0x82,0x00,0x10,0x07,0x01,0xff,0xc4,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x01,0xff,0x69,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08, + 0x01,0xff,0x69,0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0x69,0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff, + 0x69,0xcc,0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x37,0xd2,0x17,0xd1,0x0c, + 0x10,0x08,0x01,0xff,0x69,0xcc,0x87,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc4,0xb3, + 0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6a,0xcc,0x82,0x00,0x01,0xff,0x6a, + 0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x6b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7, + 0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6c,0xcc,0x81,0x00,0x10, + 0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff,0x6c,0xcc,0xa7,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x10,0x08,0x01, + 0xff,0x6c,0xcc,0x8c,0x00,0x01,0xff,0xc5,0x80,0x00,0xcf,0x86,0xd5,0xed,0xd4,0x72, + 0xd3,0x37,0xd2,0x17,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc5,0x82,0x00,0x10, + 0x04,0x01,0x00,0x01,0xff,0x6e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e, + 0xcc,0x81,0x00,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7, + 0x00,0x01,0xff,0x6e,0xcc,0x8c,0x00,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e, + 0xcc,0x8c,0x00,0x01,0xff,0xca,0xbc,0x6e,0x00,0x10,0x07,0x01,0xff,0xc5,0x8b,0x00, + 0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc, + 0x84,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00, + 0xd3,0x3b,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0xff, + 0x6f,0xcc,0x8b,0x00,0x10,0x07,0x01,0xff,0xc5,0x93,0x00,0x01,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x72,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01, + 0xff,0x72,0xcc,0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x72,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01, + 0xff,0x73,0xcc,0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x73,0xcc,0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x73, + 0xcc,0xa7,0x00,0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x7b,0xd3,0x3b,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x73,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10, + 0x08,0x01,0xff,0x74,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x74,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x10,0x07,0x01, + 0xff,0xc5,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc, + 0x83,0x00,0x01,0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x84,0x00, + 0x01,0xff,0x75,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x86,0x00, + 0x01,0xff,0x75,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x8a,0x00,0x01,0xff, + 0x75,0xcc,0x8a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc, + 0x8b,0x00,0x01,0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa8,0x00, + 0x01,0xff,0x75,0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x82,0x00, + 0x01,0xff,0x77,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x82,0x00,0x01,0xff, + 0x79,0xcc,0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,0x88,0x00, + 0x01,0xff,0x7a,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff, + 0x7a,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff, + 0x7a,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0xff,0x73,0x00, + 0xe0,0x65,0x01,0xcf,0x86,0xd5,0xb4,0xd4,0x5a,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10, + 0x04,0x01,0x00,0x01,0xff,0xc9,0x93,0x00,0x10,0x07,0x01,0xff,0xc6,0x83,0x00,0x01, + 0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff, + 0xc9,0x94,0x00,0x01,0xff,0xc6,0x88,0x00,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x01,0x00, + 0x01,0xff,0xc9,0x96,0x00,0x10,0x07,0x01,0xff,0xc9,0x97,0x00,0x01,0xff,0xc6,0x8c, + 0x00,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xc7,0x9d,0x00,0x01,0xff,0xc9,0x99, + 0x00,0xd3,0x32,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0x9b,0x00,0x01,0xff, + 0xc6,0x92,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xa0,0x00,0xd1,0x0b,0x10,0x07, + 0x01,0xff,0xc9,0xa3,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0xa9,0x00,0x01,0xff, + 0xc9,0xa8,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0x99,0x00,0x01,0x00, + 0x01,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0xaf,0x00,0x01,0xff,0xc9,0xb2,0x00, + 0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xb5,0x00,0xd4,0x5d,0xd3,0x34,0xd2,0x1b,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x10, + 0x07,0x01,0xff,0xc6,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0xa5, + 0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x80,0x00,0x01,0xff,0xc6,0xa8,0x00,0xd2, + 0x0f,0x91,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x83,0x00,0x01,0x00,0xd1,0x0b, + 0x10,0x07,0x01,0xff,0xc6,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x88,0x00, + 0x01,0xff,0x75,0xcc,0x9b,0x00,0xd3,0x33,0xd2,0x1d,0xd1,0x0f,0x10,0x08,0x01,0xff, + 0x75,0xcc,0x9b,0x00,0x01,0xff,0xca,0x8a,0x00,0x10,0x07,0x01,0xff,0xca,0x8b,0x00, + 0x01,0xff,0xc6,0xb4,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc6,0xb6,0x00, + 0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x92,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01, + 0xff,0xc6,0xb9,0x00,0x01,0x00,0x01,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xbd, + 0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x44,0xd3,0x16,0x52,0x04,0x01, + 0x00,0x51,0x07,0x01,0xff,0xc7,0x86,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc7,0x89, + 0x00,0xd2,0x12,0x91,0x0b,0x10,0x07,0x01,0xff,0xc7,0x89,0x00,0x01,0x00,0x01,0xff, + 0xc7,0x8c,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x61,0xcc,0x8c,0x00,0x10, + 0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff,0x69,0xcc,0x8c,0x00,0xd3,0x46,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x6f,0xcc,0x8c, + 0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x8c,0x00,0xd1, + 0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84, + 0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x88, + 0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81, + 0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88, + 0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01, + 0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x88, + 0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x87,0xd3,0x41,0xd2, + 0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61, + 0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x01,0xff, + 0xc3,0xa6,0xcc,0x84,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc7,0xa5,0x00,0x01,0x00, + 0x10,0x08,0x01,0xff,0x67,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00, + 0x10,0x08,0x01,0xff,0x6f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14, + 0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc, + 0x84,0x00,0x10,0x09,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc, + 0x8c,0x00,0xd3,0x38,0xd2,0x1a,0xd1,0x0f,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00, + 0x01,0xff,0xc7,0xb3,0x00,0x10,0x07,0x01,0xff,0xc7,0xb3,0x00,0x01,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0x67,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x10,0x07, + 0x04,0xff,0xc6,0x95,0x00,0x04,0xff,0xc6,0xbf,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08, + 0x04,0xff,0x6e,0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff, + 0x61,0xcc,0x8a,0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12, + 0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00, + 0x10,0x09,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00, + 0xe2,0x31,0x02,0xe1,0xc3,0x44,0xe0,0xc8,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3, + 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x8f,0x00,0x01,0xff,0x61, + 0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f, + 0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f, + 0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10, + 0x08,0x01,0xff,0x6f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f, + 0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10, + 0x08,0x01,0xff,0x75,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x04,0xff,0x73,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10, + 0x08,0x04,0xff,0x74,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0xd1,0x0b,0x10, + 0x07,0x04,0xff,0xc8,0x9d,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x68,0xcc,0x8c,0x00, + 0x04,0xff,0x68,0xcc,0x8c,0x00,0xd4,0x79,0xd3,0x31,0xd2,0x16,0xd1,0x0b,0x10,0x07, + 0x06,0xff,0xc6,0x9e,0x00,0x07,0x00,0x10,0x07,0x04,0xff,0xc8,0xa3,0x00,0x04,0x00, + 0xd1,0x0b,0x10,0x07,0x04,0xff,0xc8,0xa5,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x61, + 0xcc,0x87,0x00,0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04, + 0xff,0x65,0xcc,0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x6f, + 0xcc,0x88,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,0x00,0xd1,0x14,0x10, + 0x0a,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84, + 0x00,0x10,0x08,0x04,0xff,0x6f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0xd3, + 0x27,0xe2,0x21,0x43,0xd1,0x14,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00, + 0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x79,0xcc,0x84,0x00, + 0x04,0xff,0x79,0xcc,0x84,0x00,0xd2,0x13,0x51,0x04,0x08,0x00,0x10,0x08,0x08,0xff, + 0xe2,0xb1,0xa5,0x00,0x08,0xff,0xc8,0xbc,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08, + 0xff,0xc6,0x9a,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa6,0x00,0x08,0x00,0xcf,0x86, + 0x95,0x5f,0x94,0x5b,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff, + 0xc9,0x82,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xc6,0x80,0x00,0xd1,0x0e,0x10,0x07, + 0x09,0xff,0xca,0x89,0x00,0x09,0xff,0xca,0x8c,0x00,0x10,0x07,0x09,0xff,0xc9,0x87, + 0x00,0x09,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x89,0x00,0x09,0x00, + 0x10,0x07,0x09,0xff,0xc9,0x8b,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9, + 0x8d,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xc9,0x8f,0x00,0x09,0x00,0x01,0x00,0x01, + 0x00,0xd1,0x8b,0xd0,0x0c,0xcf,0x86,0xe5,0x10,0x43,0x64,0xef,0x42,0x01,0xe6,0xcf, + 0x86,0xd5,0x2a,0xe4,0x99,0x43,0xe3,0x7f,0x43,0xd2,0x11,0xe1,0x5e,0x43,0x10,0x07, + 0x01,0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0xe1,0x65,0x43,0x10,0x09,0x01, + 0xff,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0x00,0xd4,0x0f,0x93,0x0b,0x92, + 0x07,0x61,0xab,0x43,0x01,0xea,0x06,0xe6,0x06,0xe6,0xd3,0x2c,0xd2,0x16,0xd1,0x0b, + 0x10,0x07,0x0a,0xff,0xcd,0xb1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb3,0x00, + 0x0a,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x10,0x07,0x0a, + 0xff,0xcd,0xb7,0x00,0x0a,0x00,0xd2,0x07,0x61,0x97,0x43,0x00,0x00,0x51,0x04,0x09, + 0x00,0x10,0x06,0x01,0xff,0x3b,0x00,0x10,0xff,0xcf,0xb3,0x00,0xe0,0x31,0x01,0xcf, + 0x86,0xd5,0xd3,0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01, + 0x00,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x81, + 0x00,0x01,0xff,0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5, + 0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9, + 0xcc,0x81,0x00,0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00, + 0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc, + 0x81,0x00,0xd3,0x3c,0xd2,0x20,0xd1,0x12,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88, + 0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0x00,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01, + 0xff,0xce,0xb3,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb4,0x00,0x01,0xff,0xce, + 0xb5,0x00,0x10,0x07,0x01,0xff,0xce,0xb6,0x00,0x01,0xff,0xce,0xb7,0x00,0xd2,0x1c, + 0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb8,0x00,0x01,0xff,0xce,0xb9,0x00,0x10,0x07, + 0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xce,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff, + 0xce,0xbc,0x00,0x01,0xff,0xce,0xbd,0x00,0x10,0x07,0x01,0xff,0xce,0xbe,0x00,0x01, + 0xff,0xce,0xbf,0x00,0xe4,0x85,0x43,0xd3,0x35,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01, + 0xff,0xcf,0x80,0x00,0x01,0xff,0xcf,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf, + 0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcf,0x84,0x00,0x01,0xff,0xcf,0x85,0x00, + 0x10,0x07,0x01,0xff,0xcf,0x86,0x00,0x01,0xff,0xcf,0x87,0x00,0xe2,0x2b,0x43,0xd1, + 0x0e,0x10,0x07,0x01,0xff,0xcf,0x88,0x00,0x01,0xff,0xcf,0x89,0x00,0x10,0x09,0x01, + 0xff,0xce,0xb9,0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xcf,0x86,0xd5, + 0x94,0xd4,0x3c,0xd3,0x13,0x92,0x0f,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xcf, + 0x83,0x00,0x01,0x00,0x01,0x00,0xd2,0x07,0x61,0x3a,0x43,0x01,0x00,0xd1,0x12,0x10, + 0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10, + 0x09,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x0a,0xff,0xcf,0x97,0x00,0xd3,0x2c,0xd2, + 0x11,0xe1,0x46,0x43,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb8,0x00, + 0xd1,0x10,0x10,0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0xff,0xcf,0x86,0x00, + 0x10,0x07,0x01,0xff,0xcf,0x80,0x00,0x04,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06, + 0xff,0xcf,0x99,0x00,0x06,0x00,0x10,0x07,0x01,0xff,0xcf,0x9b,0x00,0x04,0x00,0xd1, + 0x0b,0x10,0x07,0x01,0xff,0xcf,0x9d,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0x9f, + 0x00,0x04,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf, + 0xa1,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10, + 0x07,0x01,0xff,0xcf,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xa7,0x00,0x01, + 0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa9,0x00,0x01,0x00,0x10,0x07, + 0x01,0xff,0xcf,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xad,0x00, + 0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xaf,0x00,0x01,0x00,0xd3,0x2b,0xd2,0x12,0x91, + 0x0e,0x10,0x07,0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xcf,0x81,0x00,0x01,0x00,0xd1, + 0x0e,0x10,0x07,0x05,0xff,0xce,0xb8,0x00,0x05,0xff,0xce,0xb5,0x00,0x10,0x04,0x06, + 0x00,0x07,0xff,0xcf,0xb8,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x07,0x00,0x07,0xff, + 0xcf,0xb2,0x00,0x10,0x07,0x07,0xff,0xcf,0xbb,0x00,0x07,0x00,0xd1,0x0b,0x10,0x04, + 0x08,0x00,0x08,0xff,0xcd,0xbb,0x00,0x10,0x07,0x08,0xff,0xcd,0xbc,0x00,0x08,0xff, + 0xcd,0xbd,0x00,0xe3,0xed,0x46,0xe2,0x3d,0x05,0xe1,0x27,0x02,0xe0,0x66,0x01,0xcf, + 0x86,0xd5,0xf0,0xd4,0x7e,0xd3,0x40,0xd2,0x22,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0, + 0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x07,0x01,0xff,0xd1, + 0x92,0x00,0x01,0xff,0xd0,0xb3,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1, + 0x94,0x00,0x01,0xff,0xd1,0x95,0x00,0x10,0x07,0x01,0xff,0xd1,0x96,0x00,0x01,0xff, + 0xd1,0x96,0xcc,0x88,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x98,0x00, + 0x01,0xff,0xd1,0x99,0x00,0x10,0x07,0x01,0xff,0xd1,0x9a,0x00,0x01,0xff,0xd1,0x9b, + 0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8, + 0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0xff,0xd1,0x9f, + 0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xb0,0x00,0x01,0xff, + 0xd0,0xb1,0x00,0x10,0x07,0x01,0xff,0xd0,0xb2,0x00,0x01,0xff,0xd0,0xb3,0x00,0xd1, + 0x0e,0x10,0x07,0x01,0xff,0xd0,0xb4,0x00,0x01,0xff,0xd0,0xb5,0x00,0x10,0x07,0x01, + 0xff,0xd0,0xb6,0x00,0x01,0xff,0xd0,0xb7,0x00,0xd2,0x1e,0xd1,0x10,0x10,0x07,0x01, + 0xff,0xd0,0xb8,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x10,0x07,0x01,0xff,0xd0, + 0xba,0x00,0x01,0xff,0xd0,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xbc,0x00, + 0x01,0xff,0xd0,0xbd,0x00,0x10,0x07,0x01,0xff,0xd0,0xbe,0x00,0x01,0xff,0xd0,0xbf, + 0x00,0xe4,0x25,0x42,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x80, + 0x00,0x01,0xff,0xd1,0x81,0x00,0x10,0x07,0x01,0xff,0xd1,0x82,0x00,0x01,0xff,0xd1, + 0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x84,0x00,0x01,0xff,0xd1,0x85,0x00, + 0x10,0x07,0x01,0xff,0xd1,0x86,0x00,0x01,0xff,0xd1,0x87,0x00,0xd2,0x1c,0xd1,0x0e, + 0x10,0x07,0x01,0xff,0xd1,0x88,0x00,0x01,0xff,0xd1,0x89,0x00,0x10,0x07,0x01,0xff, + 0xd1,0x8a,0x00,0x01,0xff,0xd1,0x8b,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x8c, + 0x00,0x01,0xff,0xd1,0x8d,0x00,0x10,0x07,0x01,0xff,0xd1,0x8e,0x00,0x01,0xff,0xd1, + 0x8f,0x00,0xcf,0x86,0xd5,0x07,0x64,0xcf,0x41,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2, + 0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff, + 0xd1,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa5,0x00,0x01,0x00, + 0x10,0x07,0x01,0xff,0xd1,0xa7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01, + 0xff,0xd1,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xab,0x00,0x01,0x00,0xd1, + 0x0b,0x10,0x07,0x01,0xff,0xd1,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xaf, + 0x00,0x01,0x00,0xd3,0x33,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb1,0x00, + 0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xb3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01, + 0xff,0xd1,0xb5,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01, + 0xff,0xd1,0xb5,0xcc,0x8f,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb9, + 0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07, + 0x01,0xff,0xd1,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbf,0x00,0x01,0x00, + 0xe0,0x41,0x01,0xcf,0x86,0xd5,0x8e,0xd4,0x36,0xd3,0x11,0xe2,0x91,0x41,0xe1,0x88, + 0x41,0x10,0x07,0x01,0xff,0xd2,0x81,0x00,0x01,0x00,0xd2,0x0f,0x51,0x04,0x04,0x00, + 0x10,0x07,0x06,0xff,0xd2,0x8b,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff,0xd2, + 0x8d,0x00,0x04,0x00,0x10,0x07,0x04,0xff,0xd2,0x8f,0x00,0x04,0x00,0xd3,0x2c,0xd2, + 0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x91,0x00,0x01,0x00,0x10,0x07,0x01,0xff, + 0xd2,0x93,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x95,0x00,0x01,0x00, + 0x10,0x07,0x01,0xff,0xd2,0x97,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01, + 0xff,0xd2,0x99,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9b,0x00,0x01,0x00,0xd1, + 0x0b,0x10,0x07,0x01,0xff,0xd2,0x9d,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9f, + 0x00,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2, + 0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10, + 0x07,0x01,0xff,0xd2,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa7,0x00,0x01, + 0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa9,0x00,0x01,0x00,0x10,0x07, + 0x01,0xff,0xd2,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xad,0x00, + 0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xaf,0x00,0x01,0x00,0xd3,0x2c,0xd2,0x16,0xd1, + 0x0b,0x10,0x07,0x01,0xff,0xd2,0xb1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xb3, + 0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb5,0x00,0x01,0x00,0x10,0x07, + 0x01,0xff,0xd2,0xb7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2, + 0xb9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10, + 0x07,0x01,0xff,0xd2,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbf,0x00,0x01, + 0x00,0xcf,0x86,0xd5,0xdc,0xd4,0x5a,0xd3,0x36,0xd2,0x20,0xd1,0x10,0x10,0x07,0x01, + 0xff,0xd3,0x8f,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0, + 0xb6,0xcc,0x86,0x00,0x01,0xff,0xd3,0x84,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x06, + 0xff,0xd3,0x86,0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x88,0x00,0xd2,0x16,0xd1, + 0x0b,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8a,0x00,0x10,0x04,0x06,0x00,0x01,0xff, + 0xd3,0x8c,0x00,0xe1,0x69,0x40,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8e,0x00,0xd3, + 0x41,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x01,0xff, + 0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x01,0xff, + 0xd0,0xb0,0xcc,0x88,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x95,0x00,0x01,0x00, + 0x10,0x09,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00, + 0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x99,0x00,0x01,0x00,0x10,0x09,0x01, + 0xff,0xd3,0x99,0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10, + 0x09,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10, + 0x09,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4, + 0x82,0xd3,0x41,0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa1,0x00,0x01,0x00, + 0x10,0x09,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00, + 0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc, + 0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc, + 0x88,0x00,0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa9,0x00,0x01,0x00,0x10, + 0x09,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1, + 0x12,0x10,0x09,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88, + 0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84, + 0x00,0xd3,0x41,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00, + 0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00, + 0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x87,0xcc, + 0x88,0x00,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00,0x10,0x07,0x08,0xff,0xd3,0xb7,0x00, + 0x08,0x00,0xd2,0x1d,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x01, + 0xff,0xd1,0x8b,0xcc,0x88,0x00,0x10,0x07,0x09,0xff,0xd3,0xbb,0x00,0x09,0x00,0xd1, + 0x0b,0x10,0x07,0x09,0xff,0xd3,0xbd,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd3,0xbf, + 0x00,0x09,0x00,0xe1,0x26,0x02,0xe0,0x78,0x01,0xcf,0x86,0xd5,0xb0,0xd4,0x58,0xd3, + 0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x81,0x00,0x06,0x00,0x10,0x07, + 0x06,0xff,0xd4,0x83,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x85,0x00, + 0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x87,0x00,0x06,0x00,0xd2,0x16,0xd1,0x0b,0x10, + 0x07,0x06,0xff,0xd4,0x89,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8b,0x00,0x06, + 0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x8d,0x00,0x06,0x00,0x10,0x07,0x06,0xff, + 0xd4,0x8f,0x00,0x06,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xd4, + 0x91,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd4,0x93,0x00,0x09,0x00,0xd1,0x0b,0x10, + 0x07,0x0a,0xff,0xd4,0x95,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x97,0x00,0x0a, + 0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x99,0x00,0x0a,0x00,0x10,0x07, + 0x0a,0xff,0xd4,0x9b,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x9d,0x00, + 0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x9f,0x00,0x0a,0x00,0xd4,0x58,0xd3,0x2c,0xd2, + 0x16,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0xa1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff, + 0xd4,0xa3,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xd4,0xa5,0x00,0x0b,0x00, + 0x10,0x07,0x0c,0xff,0xd4,0xa7,0x00,0x0c,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x10, + 0xff,0xd4,0xa9,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xab,0x00,0x10,0x00,0xd1, + 0x0b,0x10,0x07,0x10,0xff,0xd4,0xad,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xaf, + 0x00,0x10,0x00,0xd3,0x35,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x00,0x00,0x01,0xff,0xd5, + 0xa1,0x00,0x10,0x07,0x01,0xff,0xd5,0xa2,0x00,0x01,0xff,0xd5,0xa3,0x00,0xd1,0x0e, + 0x10,0x07,0x01,0xff,0xd5,0xa4,0x00,0x01,0xff,0xd5,0xa5,0x00,0x10,0x07,0x01,0xff, + 0xd5,0xa6,0x00,0x01,0xff,0xd5,0xa7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff, + 0xd5,0xa8,0x00,0x01,0xff,0xd5,0xa9,0x00,0x10,0x07,0x01,0xff,0xd5,0xaa,0x00,0x01, + 0xff,0xd5,0xab,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xac,0x00,0x01,0xff,0xd5, + 0xad,0x00,0x10,0x07,0x01,0xff,0xd5,0xae,0x00,0x01,0xff,0xd5,0xaf,0x00,0xcf,0x86, + 0xe5,0x08,0x3f,0xd4,0x70,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5, + 0xb0,0x00,0x01,0xff,0xd5,0xb1,0x00,0x10,0x07,0x01,0xff,0xd5,0xb2,0x00,0x01,0xff, + 0xd5,0xb3,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb4,0x00,0x01,0xff,0xd5,0xb5, + 0x00,0x10,0x07,0x01,0xff,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb7,0x00,0xd2,0x1c,0xd1, + 0x0e,0x10,0x07,0x01,0xff,0xd5,0xb8,0x00,0x01,0xff,0xd5,0xb9,0x00,0x10,0x07,0x01, + 0xff,0xd5,0xba,0x00,0x01,0xff,0xd5,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5, + 0xbc,0x00,0x01,0xff,0xd5,0xbd,0x00,0x10,0x07,0x01,0xff,0xd5,0xbe,0x00,0x01,0xff, + 0xd5,0xbf,0x00,0xe3,0x87,0x3e,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x80, + 0x00,0x01,0xff,0xd6,0x81,0x00,0x10,0x07,0x01,0xff,0xd6,0x82,0x00,0x01,0xff,0xd6, + 0x83,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x84,0x00,0x01,0xff,0xd6,0x85,0x00, + 0x10,0x07,0x01,0xff,0xd6,0x86,0x00,0x00,0x00,0xe0,0x2f,0x3f,0xcf,0x86,0xe5,0xc0, + 0x3e,0xe4,0x97,0x3e,0xe3,0x76,0x3e,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10, + 0x04,0x01,0x00,0x01,0xff,0xd5,0xa5,0xd6,0x82,0x00,0xe4,0x3e,0x25,0xe3,0xc3,0x1a, + 0xe2,0x7b,0x81,0xe1,0xc0,0x13,0xd0,0x1e,0xcf,0x86,0xc5,0xe4,0x08,0x4b,0xe3,0x53, + 0x46,0xe2,0xe9,0x43,0xe1,0x1c,0x43,0xe0,0xe1,0x42,0xcf,0x86,0xe5,0xa6,0x42,0x64, + 0x89,0x42,0x0b,0x00,0xcf,0x86,0xe5,0xfa,0x01,0xe4,0x03,0x56,0xe3,0x76,0x01,0xe2, + 0x8e,0x53,0xd1,0x0c,0xe0,0xef,0x52,0xcf,0x86,0x65,0x8d,0x52,0x04,0x00,0xe0,0x0d, + 0x01,0xcf,0x86,0xd5,0x0a,0xe4,0x10,0x53,0x63,0xff,0x52,0x0a,0x00,0xd4,0x80,0xd3, + 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x80,0x00,0x01,0xff,0xe2, + 0xb4,0x81,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x82,0x00,0x01,0xff,0xe2,0xb4,0x83, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x84,0x00,0x01,0xff,0xe2,0xb4,0x85, + 0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x86,0x00,0x01,0xff,0xe2,0xb4,0x87,0x00,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x88,0x00,0x01,0xff,0xe2,0xb4,0x89, + 0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x8a,0x00,0x01,0xff,0xe2,0xb4,0x8b,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x8c,0x00,0x01,0xff,0xe2,0xb4,0x8d,0x00,0x10, + 0x08,0x01,0xff,0xe2,0xb4,0x8e,0x00,0x01,0xff,0xe2,0xb4,0x8f,0x00,0xd3,0x40,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x90,0x00,0x01,0xff,0xe2,0xb4,0x91, + 0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0x92,0x00,0x01,0xff,0xe2,0xb4,0x93,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x94,0x00,0x01,0xff,0xe2,0xb4,0x95,0x00,0x10, + 0x08,0x01,0xff,0xe2,0xb4,0x96,0x00,0x01,0xff,0xe2,0xb4,0x97,0x00,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x98,0x00,0x01,0xff,0xe2,0xb4,0x99,0x00,0x10, + 0x08,0x01,0xff,0xe2,0xb4,0x9a,0x00,0x01,0xff,0xe2,0xb4,0x9b,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0xe2,0xb4,0x9c,0x00,0x01,0xff,0xe2,0xb4,0x9d,0x00,0x10,0x08,0x01, + 0xff,0xe2,0xb4,0x9e,0x00,0x01,0xff,0xe2,0xb4,0x9f,0x00,0xcf,0x86,0xe5,0x42,0x52, + 0x94,0x50,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa0,0x00, + 0x01,0xff,0xe2,0xb4,0xa1,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa2,0x00,0x01,0xff, + 0xe2,0xb4,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa4,0x00,0x01,0xff, + 0xe2,0xb4,0xa5,0x00,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xa7,0x00,0x52,0x04, + 0x00,0x00,0x91,0x0c,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xad,0x00,0x00,0x00, + 0x01,0x00,0xd2,0x1b,0xe1,0xfc,0x52,0xe0,0xad,0x52,0xcf,0x86,0x95,0x0f,0x94,0x0b, + 0x93,0x07,0x62,0x92,0x52,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd1,0x13,0xe0, + 0xd3,0x53,0xcf,0x86,0x95,0x0a,0xe4,0xa8,0x53,0x63,0x97,0x53,0x04,0x00,0x04,0x00, + 0xd0,0x0d,0xcf,0x86,0x95,0x07,0x64,0x22,0x54,0x08,0x00,0x04,0x00,0xcf,0x86,0x55, + 0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x07,0x62,0x2f,0x54,0x04,0x00,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0xb0,0x00,0x11,0xff,0xe1,0x8f,0xb1,0x00, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0xb2,0x00,0x11,0xff,0xe1,0x8f,0xb3,0x00,0x91,0x10, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0xb4,0x00,0x11,0xff,0xe1,0x8f,0xb5,0x00,0x00,0x00, + 0xd4,0x1c,0xe3,0xe0,0x56,0xe2,0x17,0x56,0xe1,0xda,0x55,0xe0,0xbb,0x55,0xcf,0x86, + 0x95,0x0a,0xe4,0xa4,0x55,0x63,0x88,0x55,0x04,0x00,0x04,0x00,0xe3,0xd2,0x01,0xe2, + 0x2b,0x5a,0xd1,0x0c,0xe0,0x4c,0x59,0xcf,0x86,0x65,0x25,0x59,0x0a,0x00,0xe0,0x9c, + 0x59,0xcf,0x86,0xd5,0xc5,0xd4,0x45,0xd3,0x31,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x12, + 0xff,0xd0,0xb2,0x00,0x12,0xff,0xd0,0xb4,0x00,0x10,0x07,0x12,0xff,0xd0,0xbe,0x00, + 0x12,0xff,0xd1,0x81,0x00,0x51,0x07,0x12,0xff,0xd1,0x82,0x00,0x10,0x07,0x12,0xff, + 0xd1,0x8a,0x00,0x12,0xff,0xd1,0xa3,0x00,0x92,0x10,0x91,0x0c,0x10,0x08,0x12,0xff, + 0xea,0x99,0x8b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x14,0xff,0xe1,0x83,0x90,0x00,0x14,0xff,0xe1,0x83,0x91,0x00,0x10,0x08, + 0x14,0xff,0xe1,0x83,0x92,0x00,0x14,0xff,0xe1,0x83,0x93,0x00,0xd1,0x10,0x10,0x08, + 0x14,0xff,0xe1,0x83,0x94,0x00,0x14,0xff,0xe1,0x83,0x95,0x00,0x10,0x08,0x14,0xff, + 0xe1,0x83,0x96,0x00,0x14,0xff,0xe1,0x83,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x14,0xff,0xe1,0x83,0x98,0x00,0x14,0xff,0xe1,0x83,0x99,0x00,0x10,0x08,0x14,0xff, + 0xe1,0x83,0x9a,0x00,0x14,0xff,0xe1,0x83,0x9b,0x00,0xd1,0x10,0x10,0x08,0x14,0xff, + 0xe1,0x83,0x9c,0x00,0x14,0xff,0xe1,0x83,0x9d,0x00,0x10,0x08,0x14,0xff,0xe1,0x83, + 0x9e,0x00,0x14,0xff,0xe1,0x83,0x9f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x14,0xff,0xe1,0x83,0xa0,0x00,0x14,0xff,0xe1,0x83,0xa1,0x00,0x10,0x08, + 0x14,0xff,0xe1,0x83,0xa2,0x00,0x14,0xff,0xe1,0x83,0xa3,0x00,0xd1,0x10,0x10,0x08, + 0x14,0xff,0xe1,0x83,0xa4,0x00,0x14,0xff,0xe1,0x83,0xa5,0x00,0x10,0x08,0x14,0xff, + 0xe1,0x83,0xa6,0x00,0x14,0xff,0xe1,0x83,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x14,0xff,0xe1,0x83,0xa8,0x00,0x14,0xff,0xe1,0x83,0xa9,0x00,0x10,0x08,0x14,0xff, + 0xe1,0x83,0xaa,0x00,0x14,0xff,0xe1,0x83,0xab,0x00,0xd1,0x10,0x10,0x08,0x14,0xff, + 0xe1,0x83,0xac,0x00,0x14,0xff,0xe1,0x83,0xad,0x00,0x10,0x08,0x14,0xff,0xe1,0x83, + 0xae,0x00,0x14,0xff,0xe1,0x83,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x14,0xff,0xe1,0x83,0xb0,0x00,0x14,0xff,0xe1,0x83,0xb1,0x00,0x10,0x08,0x14,0xff, + 0xe1,0x83,0xb2,0x00,0x14,0xff,0xe1,0x83,0xb3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff, + 0xe1,0x83,0xb4,0x00,0x14,0xff,0xe1,0x83,0xb5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83, + 0xb6,0x00,0x14,0xff,0xe1,0x83,0xb7,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x14,0xff, + 0xe1,0x83,0xb8,0x00,0x14,0xff,0xe1,0x83,0xb9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83, + 0xba,0x00,0x00,0x00,0xd1,0x0c,0x10,0x04,0x00,0x00,0x14,0xff,0xe1,0x83,0xbd,0x00, + 0x10,0x08,0x14,0xff,0xe1,0x83,0xbe,0x00,0x14,0xff,0xe1,0x83,0xbf,0x00,0xe2,0x9d, + 0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3,0x40, + 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa5,0x00,0x01,0xff,0x61,0xcc, + 0xa5,0x00,0x10,0x08,0x01,0xff,0x62,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87,0x00, + 0xd1,0x10,0x10,0x08,0x01,0xff,0x62,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3,0x00, + 0x10,0x08,0x01,0xff,0x62,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2,0x24, + 0xd1,0x14,0x10,0x0a,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc, + 0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x87,0x00,0x01,0xff,0x64,0xcc, + 0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa3,0x00,0x01,0xff,0x64,0xcc, + 0xa3,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1,0x00, + 0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa7,0x00,0x01,0xff, + 0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xad,0x00,0x01,0xff,0x64,0xcc, + 0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff, + 0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00, + 0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, + 0x65,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x65,0xcc, + 0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc, + 0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01,0xff, + 0x66,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84,0x00, + 0x10,0x08,0x01,0xff,0x68,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0x68,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10,0x08, + 0x01,0xff,0x68,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x01,0xff,0x68,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10,0x08, + 0x01,0xff,0x68,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0x69,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01,0xff, + 0x69,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3,0x40, + 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x81,0x00,0x01,0xff,0x6b,0xcc, + 0x81,0x00,0x10,0x08,0x01,0xff,0x6b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3,0x00, + 0xd1,0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1,0x00, + 0x10,0x08,0x01,0xff,0x6c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2,0x24, + 0xd1,0x14,0x10,0x0a,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c,0xcc, + 0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0xb1,0x00,0x01,0xff,0x6c,0xcc, + 0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xad,0x00,0x01,0xff,0x6c,0xcc, + 0xad,0x00,0x10,0x08,0x01,0xff,0x6d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81,0x00, + 0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x6d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6d, + 0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e, + 0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa3, + 0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e, + 0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xad, + 0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83, + 0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f, + 0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48,0xd2, + 0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x6f, + 0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0x01, + 0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x70,0xcc,0x81, + 0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x70,0xcc,0x87,0x00,0x01, + 0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x87, + 0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xa3,0x00,0x01, + 0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84, + 0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xb1, + 0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x73,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08,0x01, + 0xff,0x73,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01, + 0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x10, + 0x0a,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87, + 0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x01, + 0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0x87,0x00,0x01, + 0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xa3,0x00,0x01, + 0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0xb1,0x00,0x01,0xff,0x74, + 0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xad, + 0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa4,0x00,0x01, + 0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xb0,0x00,0x01, + 0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xad,0x00,0x01,0xff,0x75, + 0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81, + 0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x84, + 0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x76,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x76, + 0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x11,0x02,0xcf,0x86,0xd5,0xe2, + 0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x80,0x00, + 0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x81,0x00,0x01,0xff, + 0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x88,0x00,0x01,0xff, + 0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x87,0x00,0x01,0xff,0x77,0xcc, + 0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0xa3,0x00,0x01,0xff, + 0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x78,0xcc,0x87,0x00,0x01,0xff,0x78,0xcc, + 0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x78,0xcc,0x88,0x00,0x01,0xff,0x78,0xcc, + 0x88,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87,0x00, + 0xd3,0x33,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x82,0x00,0x01,0xff, + 0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0xa3,0x00,0x01,0xff,0x7a,0xcc, + 0xa3,0x00,0xe1,0x12,0x59,0x10,0x08,0x01,0xff,0x7a,0xcc,0xb1,0x00,0x01,0xff,0x7a, + 0xcc,0xb1,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01, + 0xff,0x79,0xcc,0x8a,0x00,0x10,0x08,0x01,0xff,0x61,0xca,0xbe,0x00,0x02,0xff,0x73, + 0xcc,0x87,0x00,0x51,0x04,0x0a,0x00,0x10,0x07,0x0a,0xff,0x73,0x73,0x00,0x0a,0x00, + 0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa3,0x00, + 0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x89,0x00,0x01,0xff, + 0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00, + 0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc, + 0x80,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a, + 0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00, + 0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc, + 0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff, + 0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00, + 0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a, + 0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00, + 0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc, + 0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff, + 0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00, + 0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, + 0x65,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x65,0xcc, + 0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc, + 0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc, + 0x81,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,0xd4, + 0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80, + 0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82, + 0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01, + 0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10, + 0x0a,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82, + 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x89,0x00,0x01,0xff,0x69, + 0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3, + 0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3, + 0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x01, + 0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80, + 0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f, + 0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01, + 0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2, + 0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f, + 0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0x01, + 0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b, + 0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f, + 0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,0xd3, + 0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x01, + 0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3, + 0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75, + 0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x89, + 0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75, + 0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,0x01, + 0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1, + 0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b, + 0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75, + 0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75, + 0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,0x01, + 0xff,0x79,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x79,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79, + 0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x79,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x10,0x08,0x0a,0xff,0xe1, + 0xbb,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbd,0x00,0x0a, + 0x00,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbf,0x00,0x0a,0x00,0xe1,0xbf,0x02,0xe0,0xa1, + 0x01,0xcf,0x86,0xd5,0xc6,0xd4,0x6c,0xd3,0x18,0xe2,0x0e,0x59,0xe1,0xf7,0x58,0x10, + 0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00,0xd2, + 0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1, + 0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff, + 0xce,0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc, + 0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01, + 0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82, + 0x00,0xd3,0x18,0xe2,0x4a,0x59,0xe1,0x33,0x59,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc, + 0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01, + 0xff,0xce,0xb5,0xcc,0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01, + 0xff,0xce,0xb5,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80, + 0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff, + 0xce,0xb5,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd4,0x6c,0xd3,0x18,0xe2,0x74,0x59, + 0xe1,0x5d,0x59,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7, + 0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00, + 0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc, + 0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01, + 0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81, + 0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7, + 0xcc,0x94,0xcd,0x82,0x00,0xd3,0x18,0xe2,0xb0,0x59,0xe1,0x99,0x59,0x10,0x09,0x01, + 0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0xd2,0x28,0xd1, + 0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94, + 0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9, + 0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc, + 0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce, + 0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00,0xcf, + 0x86,0xd5,0xac,0xd4,0x5a,0xd3,0x18,0xe2,0xed,0x59,0xe1,0xd6,0x59,0x10,0x09,0x01, + 0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0xd2,0x28,0xd1, + 0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94, + 0x00,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf, + 0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc, + 0x81,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x18,0xe2, + 0x17,0x5a,0xe1,0x00,0x5a,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff, + 0xcf,0x85,0xcc,0x94,0x00,0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xcf, + 0x85,0xcc,0x94,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x80, + 0x00,0xd1,0x0f,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00, + 0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xe4,0xd3,0x5a, + 0xd3,0x18,0xe2,0x52,0x5a,0xe1,0x3b,0x5a,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93, + 0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff, + 0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff, + 0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0x00, + 0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xcf, + 0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82, + 0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xe0,0xd9,0x02,0xcf,0x86,0xe5, + 0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1, + 0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d, + 0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc, + 0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93, + 0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00, + 0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce, + 0xb1,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff, + 0xce,0xb1,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00, + 0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce, + 0xb1,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1, + 0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce, + 0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01, + 0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x64,0xd2,0x30,0xd1,0x16, + 0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc, + 0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9, + 0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d, + 0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc, + 0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82, + 0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30, + 0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce, + 0xb7,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80, + 0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a, + 0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce, + 0xb7,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93, + 0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00, + 0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93, + 0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff, + 0xcf,0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc, + 0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81, + 0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d, + 0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc, + 0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89, + 0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d, + 0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc, + 0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93, + 0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00, + 0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf, + 0x89,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x49,0xd2,0x26,0xd1,0x12,0x10,0x09, + 0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc,0x84,0x00,0x10,0x0b, + 0x01,0xff,0xce,0xb1,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00, + 0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10, + 0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcd,0x82,0xce,0xb9, + 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff, + 0xce,0xb1,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff, + 0xce,0xb1,0xcc,0x81,0x00,0xe1,0xf3,0x5a,0x10,0x09,0x01,0xff,0xce,0xb1,0xce,0xb9, + 0x00,0x01,0x00,0xcf,0x86,0xd5,0xbd,0xd4,0x7e,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10, + 0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7, + 0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b, + 0x01,0xff,0xce,0xb7,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce, + 0xb7,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1, + 0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81, + 0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81, + 0x00,0xe1,0x02,0x5b,0x10,0x09,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0x01,0xff,0xe1, + 0xbe,0xbf,0xcc,0x80,0x00,0xd3,0x18,0xe2,0x28,0x5b,0xe1,0x11,0x5b,0x10,0x09,0x01, + 0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0xe2,0x4c,0x5b, + 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc, + 0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc, + 0x81,0x00,0xd4,0x51,0xd3,0x18,0xe2,0x6f,0x5b,0xe1,0x58,0x5b,0x10,0x09,0x01,0xff, + 0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0xd2,0x24,0xd1,0x12, + 0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00, + 0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00, + 0xe1,0x8f,0x5b,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8, + 0xcc,0x80,0x00,0xd3,0x3b,0xd2,0x18,0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf, + 0x89,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0xd1,0x0f,0x10, + 0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff, + 0xcf,0x89,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24, + 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc, + 0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc, + 0x81,0x00,0xe1,0x99,0x5b,0x10,0x09,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0x01,0xff, + 0xc2,0xb4,0x00,0xe0,0x0c,0x68,0xcf,0x86,0xe5,0x23,0x02,0xe4,0x25,0x01,0xe3,0x85, + 0x5e,0xd2,0x2a,0xe1,0x5f,0x5c,0xe0,0xdd,0x5b,0xcf,0x86,0xe5,0xbb,0x5b,0x94,0x1b, + 0xe3,0xa4,0x5b,0x92,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01, + 0xff,0xe2,0x80,0x83,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd1,0xd6,0xd0,0x46,0xcf, + 0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04,0x01, + 0x00,0x10,0x07,0x01,0xff,0xcf,0x89,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01,0x00, + 0x10,0x06,0x01,0xff,0x6b,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x01,0x00,0xe3,0x25, + 0x5d,0x92,0x10,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0x8e,0x00,0x01, + 0x00,0x01,0x00,0xcf,0x86,0xd5,0x0a,0xe4,0x42,0x5d,0x63,0x2d,0x5d,0x06,0x00,0x94, + 0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb0,0x00,0x01, + 0xff,0xe2,0x85,0xb1,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb2,0x00,0x01,0xff,0xe2, + 0x85,0xb3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb4,0x00,0x01,0xff,0xe2, + 0x85,0xb5,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb6,0x00,0x01,0xff,0xe2,0x85,0xb7, + 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb8,0x00,0x01,0xff,0xe2, + 0x85,0xb9,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xba,0x00,0x01,0xff,0xe2,0x85,0xbb, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xbc,0x00,0x01,0xff,0xe2,0x85,0xbd, + 0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xbe,0x00,0x01,0xff,0xe2,0x85,0xbf,0x00,0x01, + 0x00,0xe0,0x34,0x5d,0xcf,0x86,0xe5,0x13,0x5d,0xe4,0xf2,0x5c,0xe3,0xe1,0x5c,0xe2, + 0xd4,0x5c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0xff,0xe2,0x86,0x84,0x00, + 0xe3,0x23,0x61,0xe2,0xf0,0x60,0xd1,0x0c,0xe0,0x9d,0x60,0xcf,0x86,0x65,0x7e,0x60, + 0x01,0x00,0xd0,0x62,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x18, + 0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x90,0x00, + 0x01,0xff,0xe2,0x93,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93, + 0x92,0x00,0x01,0xff,0xe2,0x93,0x93,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x94,0x00, + 0x01,0xff,0xe2,0x93,0x95,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x96,0x00, + 0x01,0xff,0xe2,0x93,0x97,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x98,0x00,0x01,0xff, + 0xe2,0x93,0x99,0x00,0xcf,0x86,0xe5,0x57,0x60,0x94,0x80,0xd3,0x40,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x9a,0x00,0x01,0xff,0xe2,0x93,0x9b,0x00,0x10, + 0x08,0x01,0xff,0xe2,0x93,0x9c,0x00,0x01,0xff,0xe2,0x93,0x9d,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0xe2,0x93,0x9e,0x00,0x01,0xff,0xe2,0x93,0x9f,0x00,0x10,0x08,0x01, + 0xff,0xe2,0x93,0xa0,0x00,0x01,0xff,0xe2,0x93,0xa1,0x00,0xd2,0x20,0xd1,0x10,0x10, + 0x08,0x01,0xff,0xe2,0x93,0xa2,0x00,0x01,0xff,0xe2,0x93,0xa3,0x00,0x10,0x08,0x01, + 0xff,0xe2,0x93,0xa4,0x00,0x01,0xff,0xe2,0x93,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0xe2,0x93,0xa6,0x00,0x01,0xff,0xe2,0x93,0xa7,0x00,0x10,0x08,0x01,0xff,0xe2, + 0x93,0xa8,0x00,0x01,0xff,0xe2,0x93,0xa9,0x00,0x01,0x00,0xd4,0x0c,0xe3,0x33,0x62, + 0xe2,0x2c,0x62,0xcf,0x06,0x04,0x00,0xe3,0x0c,0x65,0xe2,0xff,0x63,0xe1,0x2e,0x02, + 0xe0,0x84,0x01,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x08,0xff,0xe2,0xb0,0xb0,0x00,0x08,0xff,0xe2,0xb0,0xb1,0x00,0x10,0x08, + 0x08,0xff,0xe2,0xb0,0xb2,0x00,0x08,0xff,0xe2,0xb0,0xb3,0x00,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe2,0xb0,0xb4,0x00,0x08,0xff,0xe2,0xb0,0xb5,0x00,0x10,0x08,0x08,0xff, + 0xe2,0xb0,0xb6,0x00,0x08,0xff,0xe2,0xb0,0xb7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe2,0xb0,0xb8,0x00,0x08,0xff,0xe2,0xb0,0xb9,0x00,0x10,0x08,0x08,0xff, + 0xe2,0xb0,0xba,0x00,0x08,0xff,0xe2,0xb0,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe2,0xb0,0xbc,0x00,0x08,0xff,0xe2,0xb0,0xbd,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0, + 0xbe,0x00,0x08,0xff,0xe2,0xb0,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe2,0xb1,0x80,0x00,0x08,0xff,0xe2,0xb1,0x81,0x00,0x10,0x08,0x08,0xff, + 0xe2,0xb1,0x82,0x00,0x08,0xff,0xe2,0xb1,0x83,0x00,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe2,0xb1,0x84,0x00,0x08,0xff,0xe2,0xb1,0x85,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1, + 0x86,0x00,0x08,0xff,0xe2,0xb1,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe2,0xb1,0x88,0x00,0x08,0xff,0xe2,0xb1,0x89,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1, + 0x8a,0x00,0x08,0xff,0xe2,0xb1,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1, + 0x8c,0x00,0x08,0xff,0xe2,0xb1,0x8d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8e,0x00, + 0x08,0xff,0xe2,0xb1,0x8f,0x00,0x94,0x7c,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe2,0xb1,0x90,0x00,0x08,0xff,0xe2,0xb1,0x91,0x00,0x10,0x08,0x08,0xff, + 0xe2,0xb1,0x92,0x00,0x08,0xff,0xe2,0xb1,0x93,0x00,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe2,0xb1,0x94,0x00,0x08,0xff,0xe2,0xb1,0x95,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1, + 0x96,0x00,0x08,0xff,0xe2,0xb1,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe2,0xb1,0x98,0x00,0x08,0xff,0xe2,0xb1,0x99,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1, + 0x9a,0x00,0x08,0xff,0xe2,0xb1,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1, + 0x9c,0x00,0x08,0xff,0xe2,0xb1,0x9d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9e,0x00, + 0x00,0x00,0x08,0x00,0xcf,0x86,0xd5,0x07,0x64,0xef,0x61,0x08,0x00,0xd4,0x63,0xd3, + 0x32,0xd2,0x1b,0xd1,0x0c,0x10,0x08,0x09,0xff,0xe2,0xb1,0xa1,0x00,0x09,0x00,0x10, + 0x07,0x09,0xff,0xc9,0xab,0x00,0x09,0xff,0xe1,0xb5,0xbd,0x00,0xd1,0x0b,0x10,0x07, + 0x09,0xff,0xc9,0xbd,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xa8, + 0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xaa,0x00,0x10, + 0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xac,0x00,0xd1,0x0b,0x10,0x04,0x09,0x00,0x0a, + 0xff,0xc9,0x91,0x00,0x10,0x07,0x0a,0xff,0xc9,0xb1,0x00,0x0a,0xff,0xc9,0x90,0x00, + 0xd3,0x27,0xd2,0x17,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xc9,0x92,0x00,0x0a,0x00,0x10, + 0x08,0x0a,0xff,0xe2,0xb1,0xb3,0x00,0x0a,0x00,0x91,0x0c,0x10,0x04,0x09,0x00,0x09, + 0xff,0xe2,0xb1,0xb6,0x00,0x09,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10, + 0x07,0x0b,0xff,0xc8,0xbf,0x00,0x0b,0xff,0xc9,0x80,0x00,0xe0,0x83,0x01,0xcf,0x86, + 0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2, + 0x81,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x83,0x00,0x08,0x00,0xd1,0x0c, + 0x10,0x08,0x08,0xff,0xe2,0xb2,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2, + 0x87,0x00,0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x89,0x00, + 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08, + 0x08,0xff,0xe2,0xb2,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8f,0x00, + 0x08,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x91,0x00, + 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08, + 0x08,0xff,0xe2,0xb2,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x97,0x00, + 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x99,0x00,0x08,0x00, + 0x10,0x08,0x08,0xff,0xe2,0xb2,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, + 0xe2,0xb2,0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x9f,0x00,0x08,0x00, + 0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa1,0x00, + 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08, + 0x08,0xff,0xe2,0xb2,0xa5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa7,0x00, + 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa9,0x00,0x08,0x00, + 0x10,0x08,0x08,0xff,0xe2,0xb2,0xab,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, + 0xe2,0xb2,0xad,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xaf,0x00,0x08,0x00, + 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb1,0x00,0x08,0x00, + 0x10,0x08,0x08,0xff,0xe2,0xb2,0xb3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, + 0xe2,0xb2,0xb5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb7,0x00,0x08,0x00, + 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb9,0x00,0x08,0x00,0x10,0x08, + 0x08,0xff,0xe2,0xb2,0xbb,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2, + 0xbd,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbf,0x00,0x08,0x00,0xcf,0x86, + 0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3, + 0x81,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x83,0x00,0x08,0x00,0xd1,0x0c, + 0x10,0x08,0x08,0xff,0xe2,0xb3,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3, + 0x87,0x00,0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x89,0x00, + 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08, + 0x08,0xff,0xe2,0xb3,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8f,0x00, + 0x08,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x91,0x00, + 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08, + 0x08,0xff,0xe2,0xb3,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x97,0x00, + 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x99,0x00,0x08,0x00, + 0x10,0x08,0x08,0xff,0xe2,0xb3,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, + 0xe2,0xb3,0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x9f,0x00,0x08,0x00, + 0xd4,0x3b,0xd3,0x1c,0x92,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa1,0x00, + 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa3,0x00,0x08,0x00,0x08,0x00,0xd2,0x10, + 0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0xff,0xe2,0xb3,0xac,0x00,0xe1,0x3b, + 0x5f,0x10,0x04,0x0b,0x00,0x0b,0xff,0xe2,0xb3,0xae,0x00,0xe3,0x40,0x5f,0x92,0x10, + 0x51,0x04,0x0b,0xe6,0x10,0x08,0x0d,0xff,0xe2,0xb3,0xb3,0x00,0x0d,0x00,0x00,0x00, + 0xe2,0x98,0x08,0xd1,0x0b,0xe0,0x11,0x67,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe0,0x65, + 0x6c,0xcf,0x86,0xe5,0xa7,0x05,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x0c,0xe2,0xf8, + 0x67,0xe1,0x8f,0x67,0xcf,0x06,0x04,0x00,0xe2,0xdb,0x01,0xe1,0x26,0x01,0xd0,0x09, + 0xcf,0x86,0x65,0xf4,0x67,0x0a,0x00,0xcf,0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2, + 0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a, + 0xff,0xea,0x99,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x85, + 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1, + 0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea, + 0x99,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x8d,0x00,0x0a, + 0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1, + 0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea, + 0x99,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x95,0x00,0x0a, + 0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x97,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10, + 0x08,0x0a,0xff,0xea,0x99,0x99,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x9b, + 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x9d,0x00,0x0a,0x00,0x10, + 0x08,0x0a,0xff,0xea,0x99,0x9f,0x00,0x0a,0x00,0xe4,0x5d,0x67,0xd3,0x30,0xd2,0x18, + 0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x99,0xa1,0x00,0x0c,0x00,0x10,0x08,0x0a,0xff, + 0xea,0x99,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0xa5,0x00, + 0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c, + 0x10,0x08,0x0a,0xff,0xea,0x99,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99, + 0xab,0x00,0x0a,0x00,0xe1,0x0c,0x67,0x10,0x08,0x0a,0xff,0xea,0x99,0xad,0x00,0x0a, + 0x00,0xe0,0x35,0x67,0xcf,0x86,0x95,0xab,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c, + 0x10,0x08,0x0a,0xff,0xea,0x9a,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a, + 0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x85,0x00,0x0a,0x00, + 0x10,0x08,0x0a,0xff,0xea,0x9a,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08, + 0x0a,0xff,0xea,0x9a,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8b,0x00, + 0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8d,0x00,0x0a,0x00,0x10,0x08, + 0x0a,0xff,0xea,0x9a,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08, + 0x0a,0xff,0xea,0x9a,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x93,0x00, + 0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x95,0x00,0x0a,0x00,0x10,0x08, + 0x0a,0xff,0xea,0x9a,0x97,0x00,0x0a,0x00,0xe2,0x92,0x66,0xd1,0x0c,0x10,0x08,0x10, + 0xff,0xea,0x9a,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9a,0x9b,0x00,0x10, + 0x00,0x0b,0x00,0xe1,0x10,0x02,0xd0,0xb9,0xcf,0x86,0xd5,0x07,0x64,0x9e,0x66,0x08, + 0x00,0xd4,0x58,0xd3,0x28,0xd2,0x10,0x51,0x04,0x09,0x00,0x10,0x08,0x0a,0xff,0xea, + 0x9c,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa5,0x00,0x0a, + 0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10, + 0x08,0x0a,0xff,0xea,0x9c,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xab, + 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xad,0x00,0x0a,0x00,0x10, + 0x08,0x0a,0xff,0xea,0x9c,0xaf,0x00,0x0a,0x00,0xd3,0x28,0xd2,0x10,0x51,0x04,0x0a, + 0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a, + 0xff,0xea,0x9c,0xb5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb7,0x00,0x0a, + 0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb9,0x00,0x0a,0x00,0x10, + 0x08,0x0a,0xff,0xea,0x9c,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea, + 0x9c,0xbd,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbf,0x00,0x0a,0x00,0xcf, + 0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea, + 0x9d,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x83,0x00,0x0a,0x00,0xd1, + 0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x85,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea, + 0x9d,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x89, + 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10, + 0x08,0x0a,0xff,0xea,0x9d,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8f, + 0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x91, + 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10, + 0x08,0x0a,0xff,0xea,0x9d,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x97, + 0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x99,0x00,0x0a, + 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a, + 0xff,0xea,0x9d,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9f,0x00,0x0a, + 0x00,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa1, + 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10, + 0x08,0x0a,0xff,0xea,0x9d,0xa5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa7, + 0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa9,0x00,0x0a, + 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xab,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a, + 0xff,0xea,0x9d,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xaf,0x00,0x0a, + 0x00,0x53,0x04,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea, + 0x9d,0xba,0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xbc,0x00,0xd1,0x0c,0x10, + 0x04,0x0a,0x00,0x0a,0xff,0xe1,0xb5,0xb9,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xbf, + 0x00,0x0a,0x00,0xe0,0x71,0x01,0xcf,0x86,0xd5,0xa6,0xd4,0x4e,0xd3,0x30,0xd2,0x18, + 0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff, + 0xea,0x9e,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x85,0x00, + 0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9e,0x87,0x00,0x0a,0x00,0xd2,0x10,0x51,0x04, + 0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9e,0x8c,0x00,0xe1,0x9a,0x64,0x10, + 0x04,0x0a,0x00,0x0c,0xff,0xc9,0xa5,0x00,0xd3,0x28,0xd2,0x18,0xd1,0x0c,0x10,0x08, + 0x0c,0xff,0xea,0x9e,0x91,0x00,0x0c,0x00,0x10,0x08,0x0d,0xff,0xea,0x9e,0x93,0x00, + 0x0d,0x00,0x51,0x04,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x97,0x00,0x10,0x00, + 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x99,0x00,0x10,0x00,0x10,0x08, + 0x10,0xff,0xea,0x9e,0x9b,0x00,0x10,0x00,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e, + 0x9d,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x9f,0x00,0x10,0x00,0xd4,0x63, + 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa1,0x00,0x0c,0x00, + 0x10,0x08,0x0c,0xff,0xea,0x9e,0xa3,0x00,0x0c,0x00,0xd1,0x0c,0x10,0x08,0x0c,0xff, + 0xea,0x9e,0xa5,0x00,0x0c,0x00,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa7,0x00,0x0c,0x00, + 0xd2,0x1a,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa9,0x00,0x0c,0x00,0x10,0x07, + 0x0d,0xff,0xc9,0xa6,0x00,0x10,0xff,0xc9,0x9c,0x00,0xd1,0x0e,0x10,0x07,0x10,0xff, + 0xc9,0xa1,0x00,0x10,0xff,0xc9,0xac,0x00,0x10,0x07,0x12,0xff,0xc9,0xaa,0x00,0x14, + 0x00,0xd3,0x35,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x10,0xff,0xca,0x9e,0x00,0x10,0xff, + 0xca,0x87,0x00,0x10,0x07,0x11,0xff,0xca,0x9d,0x00,0x11,0xff,0xea,0xad,0x93,0x00, + 0xd1,0x0c,0x10,0x08,0x11,0xff,0xea,0x9e,0xb5,0x00,0x11,0x00,0x10,0x08,0x11,0xff, + 0xea,0x9e,0xb7,0x00,0x11,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x14,0xff,0xea,0x9e, + 0xb9,0x00,0x14,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbb,0x00,0x15,0x00,0xd1,0x0c, + 0x10,0x08,0x15,0xff,0xea,0x9e,0xbd,0x00,0x15,0x00,0x10,0x08,0x15,0xff,0xea,0x9e, + 0xbf,0x00,0x15,0x00,0xcf,0x86,0xe5,0xd4,0x63,0x94,0x2f,0x93,0x2b,0xd2,0x10,0x51, + 0x04,0x00,0x00,0x10,0x08,0x15,0xff,0xea,0x9f,0x83,0x00,0x15,0x00,0xd1,0x0f,0x10, + 0x08,0x15,0xff,0xea,0x9e,0x94,0x00,0x15,0xff,0xca,0x82,0x00,0x10,0x08,0x15,0xff, + 0xe1,0xb6,0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0xb4,0x66,0xd3,0x1d,0xe2, + 0x5b,0x64,0xe1,0x0a,0x64,0xe0,0xf7,0x63,0xcf,0x86,0xe5,0xd8,0x63,0x94,0x0b,0x93, + 0x07,0x62,0xc3,0x63,0x08,0x00,0x08,0x00,0x08,0x00,0xd2,0x0f,0xe1,0x5a,0x65,0xe0, + 0x27,0x65,0xcf,0x86,0x65,0x0c,0x65,0x0a,0x00,0xd1,0xab,0xd0,0x1a,0xcf,0x86,0xe5, + 0x17,0x66,0xe4,0xfa,0x65,0xe3,0xe1,0x65,0xe2,0xd4,0x65,0x91,0x08,0x10,0x04,0x00, + 0x00,0x0c,0x00,0x0c,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x0b,0x93,0x07,0x62, + 0x27,0x66,0x11,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff, + 0xe1,0x8e,0xa0,0x00,0x11,0xff,0xe1,0x8e,0xa1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e, + 0xa2,0x00,0x11,0xff,0xe1,0x8e,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e, + 0xa4,0x00,0x11,0xff,0xe1,0x8e,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa6,0x00, + 0x11,0xff,0xe1,0x8e,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e, + 0xa8,0x00,0x11,0xff,0xe1,0x8e,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xaa,0x00, + 0x11,0xff,0xe1,0x8e,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xac,0x00, + 0x11,0xff,0xe1,0x8e,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xae,0x00,0x11,0xff, + 0xe1,0x8e,0xaf,0x00,0xe0,0xb2,0x65,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40, + 0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb0,0x00,0x11,0xff,0xe1,0x8e, + 0xb1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb2,0x00,0x11,0xff,0xe1,0x8e,0xb3,0x00, + 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb4,0x00,0x11,0xff,0xe1,0x8e,0xb5,0x00, + 0x10,0x08,0x11,0xff,0xe1,0x8e,0xb6,0x00,0x11,0xff,0xe1,0x8e,0xb7,0x00,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb8,0x00,0x11,0xff,0xe1,0x8e,0xb9,0x00, + 0x10,0x08,0x11,0xff,0xe1,0x8e,0xba,0x00,0x11,0xff,0xe1,0x8e,0xbb,0x00,0xd1,0x10, + 0x10,0x08,0x11,0xff,0xe1,0x8e,0xbc,0x00,0x11,0xff,0xe1,0x8e,0xbd,0x00,0x10,0x08, + 0x11,0xff,0xe1,0x8e,0xbe,0x00,0x11,0xff,0xe1,0x8e,0xbf,0x00,0xd3,0x40,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0x80,0x00,0x11,0xff,0xe1,0x8f,0x81,0x00, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0x82,0x00,0x11,0xff,0xe1,0x8f,0x83,0x00,0xd1,0x10, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0x84,0x00,0x11,0xff,0xe1,0x8f,0x85,0x00,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0x86,0x00,0x11,0xff,0xe1,0x8f,0x87,0x00,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0x88,0x00,0x11,0xff,0xe1,0x8f,0x89,0x00,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0x8a,0x00,0x11,0xff,0xe1,0x8f,0x8b,0x00,0xd1,0x10,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0x8c,0x00,0x11,0xff,0xe1,0x8f,0x8d,0x00,0x10,0x08,0x11,0xff, + 0xe1,0x8f,0x8e,0x00,0x11,0xff,0xe1,0x8f,0x8f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f,0x90,0x00,0x11,0xff,0xe1,0x8f,0x91,0x00, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0x92,0x00,0x11,0xff,0xe1,0x8f,0x93,0x00,0xd1,0x10, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0x94,0x00,0x11,0xff,0xe1,0x8f,0x95,0x00,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0x96,0x00,0x11,0xff,0xe1,0x8f,0x97,0x00,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0x98,0x00,0x11,0xff,0xe1,0x8f,0x99,0x00,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0x9a,0x00,0x11,0xff,0xe1,0x8f,0x9b,0x00,0xd1,0x10,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0x9c,0x00,0x11,0xff,0xe1,0x8f,0x9d,0x00,0x10,0x08,0x11,0xff, + 0xe1,0x8f,0x9e,0x00,0x11,0xff,0xe1,0x8f,0x9f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x11,0xff,0xe1,0x8f,0xa0,0x00,0x11,0xff,0xe1,0x8f,0xa1,0x00,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0xa2,0x00,0x11,0xff,0xe1,0x8f,0xa3,0x00,0xd1,0x10,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0xa4,0x00,0x11,0xff,0xe1,0x8f,0xa5,0x00,0x10,0x08,0x11,0xff, + 0xe1,0x8f,0xa6,0x00,0x11,0xff,0xe1,0x8f,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x11,0xff,0xe1,0x8f,0xa8,0x00,0x11,0xff,0xe1,0x8f,0xa9,0x00,0x10,0x08,0x11,0xff, + 0xe1,0x8f,0xaa,0x00,0x11,0xff,0xe1,0x8f,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff, + 0xe1,0x8f,0xac,0x00,0x11,0xff,0xe1,0x8f,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f, + 0xae,0x00,0x11,0xff,0xe1,0x8f,0xaf,0x00,0xd1,0x0c,0xe0,0xeb,0x63,0xcf,0x86,0xcf, + 0x06,0x02,0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06, + 0xcf,0x06,0x01,0x00,0xd4,0xae,0xd3,0x09,0xe2,0x54,0x64,0xcf,0x06,0x01,0x00,0xd2, + 0x27,0xe1,0x1f,0x70,0xe0,0x26,0x6e,0xcf,0x86,0xe5,0x3f,0x6d,0xe4,0xce,0x6c,0xe3, + 0x99,0x6c,0xe2,0x78,0x6c,0xe1,0x67,0x6c,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00, + 0x01,0xff,0xe5,0xba,0xa6,0x00,0xe1,0x74,0x74,0xe0,0xe8,0x73,0xcf,0x86,0xe5,0x22, + 0x73,0xd4,0x3b,0x93,0x37,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x01,0xff,0x66,0x66,0x00, + 0x01,0xff,0x66,0x69,0x00,0x10,0x07,0x01,0xff,0x66,0x6c,0x00,0x01,0xff,0x66,0x66, + 0x69,0x00,0xd1,0x0f,0x10,0x08,0x01,0xff,0x66,0x66,0x6c,0x00,0x01,0xff,0x73,0x74, + 0x00,0x10,0x07,0x01,0xff,0x73,0x74,0x00,0x00,0x00,0x00,0x00,0xe3,0xc8,0x72,0xd2, + 0x11,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xb6,0x00, + 0xd1,0x12,0x10,0x09,0x01,0xff,0xd5,0xb4,0xd5,0xa5,0x00,0x01,0xff,0xd5,0xb4,0xd5, + 0xab,0x00,0x10,0x09,0x01,0xff,0xd5,0xbe,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb4,0xd5, + 0xad,0x00,0xd3,0x09,0xe2,0x40,0x74,0xcf,0x06,0x01,0x00,0xd2,0x13,0xe1,0x30,0x75, + 0xe0,0xc1,0x74,0xcf,0x86,0xe5,0x9e,0x74,0x64,0x8d,0x74,0x06,0xff,0x00,0xe1,0x96, + 0x75,0xe0,0x63,0x75,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x7c, + 0xd3,0x3c,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xef,0xbd,0x81,0x00, + 0x10,0x08,0x01,0xff,0xef,0xbd,0x82,0x00,0x01,0xff,0xef,0xbd,0x83,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xef,0xbd,0x84,0x00,0x01,0xff,0xef,0xbd,0x85,0x00,0x10,0x08, + 0x01,0xff,0xef,0xbd,0x86,0x00,0x01,0xff,0xef,0xbd,0x87,0x00,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xef,0xbd,0x88,0x00,0x01,0xff,0xef,0xbd,0x89,0x00,0x10,0x08, + 0x01,0xff,0xef,0xbd,0x8a,0x00,0x01,0xff,0xef,0xbd,0x8b,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xef,0xbd,0x8c,0x00,0x01,0xff,0xef,0xbd,0x8d,0x00,0x10,0x08,0x01,0xff, + 0xef,0xbd,0x8e,0x00,0x01,0xff,0xef,0xbd,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xef,0xbd,0x90,0x00,0x01,0xff,0xef,0xbd,0x91,0x00,0x10,0x08, + 0x01,0xff,0xef,0xbd,0x92,0x00,0x01,0xff,0xef,0xbd,0x93,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xef,0xbd,0x94,0x00,0x01,0xff,0xef,0xbd,0x95,0x00,0x10,0x08,0x01,0xff, + 0xef,0xbd,0x96,0x00,0x01,0xff,0xef,0xbd,0x97,0x00,0x92,0x1c,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xef,0xbd,0x98,0x00,0x01,0xff,0xef,0xbd,0x99,0x00,0x10,0x08,0x01,0xff, + 0xef,0xbd,0x9a,0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0x87,0xb3,0xe1,0x60,0xb0,0xe0, + 0xdd,0xae,0xcf,0x86,0xe5,0x81,0x9b,0xc4,0xe3,0xc1,0x07,0xe2,0x62,0x06,0xe1,0x11, + 0x86,0xe0,0x09,0x05,0xcf,0x86,0xe5,0xfb,0x02,0xd4,0x1c,0xe3,0x7f,0x76,0xe2,0xd6, + 0x75,0xe1,0xb1,0x75,0xe0,0x8a,0x75,0xcf,0x86,0xe5,0x57,0x75,0x94,0x07,0x63,0x42, + 0x75,0x07,0x00,0x07,0x00,0xe3,0x2b,0x78,0xe2,0xf0,0x77,0xe1,0x77,0x01,0xe0,0x88, + 0x77,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09, + 0x05,0xff,0xf0,0x90,0x90,0xa8,0x00,0x05,0xff,0xf0,0x90,0x90,0xa9,0x00,0x10,0x09, + 0x05,0xff,0xf0,0x90,0x90,0xaa,0x00,0x05,0xff,0xf0,0x90,0x90,0xab,0x00,0xd1,0x12, + 0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xac,0x00,0x05,0xff,0xf0,0x90,0x90,0xad,0x00, + 0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xae,0x00,0x05,0xff,0xf0,0x90,0x90,0xaf,0x00, + 0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb0,0x00,0x05,0xff,0xf0, + 0x90,0x90,0xb1,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb2,0x00,0x05,0xff,0xf0, + 0x90,0x90,0xb3,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb4,0x00,0x05, + 0xff,0xf0,0x90,0x90,0xb5,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb6,0x00,0x05, + 0xff,0xf0,0x90,0x90,0xb7,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff, + 0xf0,0x90,0x90,0xb8,0x00,0x05,0xff,0xf0,0x90,0x90,0xb9,0x00,0x10,0x09,0x05,0xff, + 0xf0,0x90,0x90,0xba,0x00,0x05,0xff,0xf0,0x90,0x90,0xbb,0x00,0xd1,0x12,0x10,0x09, + 0x05,0xff,0xf0,0x90,0x90,0xbc,0x00,0x05,0xff,0xf0,0x90,0x90,0xbd,0x00,0x10,0x09, + 0x05,0xff,0xf0,0x90,0x90,0xbe,0x00,0x05,0xff,0xf0,0x90,0x90,0xbf,0x00,0xd2,0x24, + 0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x80,0x00,0x05,0xff,0xf0,0x90,0x91, + 0x81,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x82,0x00,0x05,0xff,0xf0,0x90,0x91, + 0x83,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x84,0x00,0x05,0xff,0xf0, + 0x90,0x91,0x85,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x86,0x00,0x05,0xff,0xf0, + 0x90,0x91,0x87,0x00,0x94,0x4c,0x93,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff, + 0xf0,0x90,0x91,0x88,0x00,0x05,0xff,0xf0,0x90,0x91,0x89,0x00,0x10,0x09,0x05,0xff, + 0xf0,0x90,0x91,0x8a,0x00,0x05,0xff,0xf0,0x90,0x91,0x8b,0x00,0xd1,0x12,0x10,0x09, + 0x05,0xff,0xf0,0x90,0x91,0x8c,0x00,0x05,0xff,0xf0,0x90,0x91,0x8d,0x00,0x10,0x09, + 0x07,0xff,0xf0,0x90,0x91,0x8e,0x00,0x07,0xff,0xf0,0x90,0x91,0x8f,0x00,0x05,0x00, + 0x05,0x00,0xd0,0xa0,0xcf,0x86,0xd5,0x07,0x64,0x30,0x76,0x07,0x00,0xd4,0x07,0x63, + 0x3d,0x76,0x07,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90, + 0x93,0x98,0x00,0x12,0xff,0xf0,0x90,0x93,0x99,0x00,0x10,0x09,0x12,0xff,0xf0,0x90, + 0x93,0x9a,0x00,0x12,0xff,0xf0,0x90,0x93,0x9b,0x00,0xd1,0x12,0x10,0x09,0x12,0xff, + 0xf0,0x90,0x93,0x9c,0x00,0x12,0xff,0xf0,0x90,0x93,0x9d,0x00,0x10,0x09,0x12,0xff, + 0xf0,0x90,0x93,0x9e,0x00,0x12,0xff,0xf0,0x90,0x93,0x9f,0x00,0xd2,0x24,0xd1,0x12, + 0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa0,0x00,0x12,0xff,0xf0,0x90,0x93,0xa1,0x00, + 0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa2,0x00,0x12,0xff,0xf0,0x90,0x93,0xa3,0x00, + 0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa4,0x00,0x12,0xff,0xf0,0x90,0x93, + 0xa5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xa6,0x00,0x12,0xff,0xf0,0x90,0x93, + 0xa7,0x00,0xcf,0x86,0xe5,0xc6,0x75,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10, + 0x09,0x12,0xff,0xf0,0x90,0x93,0xa8,0x00,0x12,0xff,0xf0,0x90,0x93,0xa9,0x00,0x10, + 0x09,0x12,0xff,0xf0,0x90,0x93,0xaa,0x00,0x12,0xff,0xf0,0x90,0x93,0xab,0x00,0xd1, + 0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xac,0x00,0x12,0xff,0xf0,0x90,0x93,0xad, + 0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xae,0x00,0x12,0xff,0xf0,0x90,0x93,0xaf, + 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb0,0x00,0x12,0xff, + 0xf0,0x90,0x93,0xb1,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb2,0x00,0x12,0xff, + 0xf0,0x90,0x93,0xb3,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb4,0x00, + 0x12,0xff,0xf0,0x90,0x93,0xb5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb6,0x00, + 0x12,0xff,0xf0,0x90,0x93,0xb7,0x00,0x93,0x28,0x92,0x24,0xd1,0x12,0x10,0x09,0x12, + 0xff,0xf0,0x90,0x93,0xb8,0x00,0x12,0xff,0xf0,0x90,0x93,0xb9,0x00,0x10,0x09,0x12, + 0xff,0xf0,0x90,0x93,0xba,0x00,0x12,0xff,0xf0,0x90,0x93,0xbb,0x00,0x00,0x00,0x12, + 0x00,0xd4,0x1f,0xe3,0xdf,0x76,0xe2,0x6a,0x76,0xe1,0x09,0x76,0xe0,0xea,0x75,0xcf, + 0x86,0xe5,0xb7,0x75,0x94,0x0a,0xe3,0xa2,0x75,0x62,0x99,0x75,0x07,0x00,0x07,0x00, + 0xe3,0xde,0x78,0xe2,0xaf,0x78,0xd1,0x09,0xe0,0x4c,0x78,0xcf,0x06,0x0b,0x00,0xe0, + 0x7f,0x78,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10, + 0x09,0x11,0xff,0xf0,0x90,0xb3,0x80,0x00,0x11,0xff,0xf0,0x90,0xb3,0x81,0x00,0x10, + 0x09,0x11,0xff,0xf0,0x90,0xb3,0x82,0x00,0x11,0xff,0xf0,0x90,0xb3,0x83,0x00,0xd1, + 0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x84,0x00,0x11,0xff,0xf0,0x90,0xb3,0x85, + 0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x86,0x00,0x11,0xff,0xf0,0x90,0xb3,0x87, + 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x88,0x00,0x11,0xff, + 0xf0,0x90,0xb3,0x89,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8a,0x00,0x11,0xff, + 0xf0,0x90,0xb3,0x8b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8c,0x00, + 0x11,0xff,0xf0,0x90,0xb3,0x8d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8e,0x00, + 0x11,0xff,0xf0,0x90,0xb3,0x8f,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11, + 0xff,0xf0,0x90,0xb3,0x90,0x00,0x11,0xff,0xf0,0x90,0xb3,0x91,0x00,0x10,0x09,0x11, + 0xff,0xf0,0x90,0xb3,0x92,0x00,0x11,0xff,0xf0,0x90,0xb3,0x93,0x00,0xd1,0x12,0x10, + 0x09,0x11,0xff,0xf0,0x90,0xb3,0x94,0x00,0x11,0xff,0xf0,0x90,0xb3,0x95,0x00,0x10, + 0x09,0x11,0xff,0xf0,0x90,0xb3,0x96,0x00,0x11,0xff,0xf0,0x90,0xb3,0x97,0x00,0xd2, + 0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x98,0x00,0x11,0xff,0xf0,0x90, + 0xb3,0x99,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9a,0x00,0x11,0xff,0xf0,0x90, + 0xb3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9c,0x00,0x11,0xff, + 0xf0,0x90,0xb3,0x9d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9e,0x00,0x11,0xff, + 0xf0,0x90,0xb3,0x9f,0x00,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11, + 0xff,0xf0,0x90,0xb3,0xa0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa1,0x00,0x10,0x09,0x11, + 0xff,0xf0,0x90,0xb3,0xa2,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa3,0x00,0xd1,0x12,0x10, + 0x09,0x11,0xff,0xf0,0x90,0xb3,0xa4,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa5,0x00,0x10, + 0x09,0x11,0xff,0xf0,0x90,0xb3,0xa6,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa7,0x00,0xd2, + 0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xa8,0x00,0x11,0xff,0xf0,0x90, + 0xb3,0xa9,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xaa,0x00,0x11,0xff,0xf0,0x90, + 0xb3,0xab,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xac,0x00,0x11,0xff, + 0xf0,0x90,0xb3,0xad,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xae,0x00,0x11,0xff, + 0xf0,0x90,0xb3,0xaf,0x00,0x93,0x23,0x92,0x1f,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0, + 0x90,0xb3,0xb0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xb1,0x00,0x10,0x09,0x11,0xff,0xf0, + 0x90,0xb3,0xb2,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x15,0xe4,0x91, + 0x7b,0xe3,0x9b,0x79,0xe2,0x94,0x78,0xe1,0xe4,0x77,0xe0,0x9d,0x77,0xcf,0x06,0x0c, + 0x00,0xe4,0xeb,0x7e,0xe3,0x44,0x7e,0xe2,0xed,0x7d,0xd1,0x0c,0xe0,0xb2,0x7d,0xcf, + 0x86,0x65,0x93,0x7d,0x14,0x00,0xe0,0xb6,0x7d,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4, + 0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x80,0x00, + 0x10,0xff,0xf0,0x91,0xa3,0x81,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x82,0x00, + 0x10,0xff,0xf0,0x91,0xa3,0x83,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3, + 0x84,0x00,0x10,0xff,0xf0,0x91,0xa3,0x85,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3, + 0x86,0x00,0x10,0xff,0xf0,0x91,0xa3,0x87,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10, + 0xff,0xf0,0x91,0xa3,0x88,0x00,0x10,0xff,0xf0,0x91,0xa3,0x89,0x00,0x10,0x09,0x10, + 0xff,0xf0,0x91,0xa3,0x8a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8b,0x00,0xd1,0x12,0x10, + 0x09,0x10,0xff,0xf0,0x91,0xa3,0x8c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8d,0x00,0x10, + 0x09,0x10,0xff,0xf0,0x91,0xa3,0x8e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8f,0x00,0xd3, + 0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x90,0x00,0x10,0xff, + 0xf0,0x91,0xa3,0x91,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x92,0x00,0x10,0xff, + 0xf0,0x91,0xa3,0x93,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x94,0x00, + 0x10,0xff,0xf0,0x91,0xa3,0x95,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x96,0x00, + 0x10,0xff,0xf0,0x91,0xa3,0x97,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0, + 0x91,0xa3,0x98,0x00,0x10,0xff,0xf0,0x91,0xa3,0x99,0x00,0x10,0x09,0x10,0xff,0xf0, + 0x91,0xa3,0x9a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x10, + 0xff,0xf0,0x91,0xa3,0x9c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9d,0x00,0x10,0x09,0x10, + 0xff,0xf0,0x91,0xa3,0x9e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9f,0x00,0xd1,0x11,0xe0, + 0x12,0x81,0xcf,0x86,0xe5,0x09,0x81,0xe4,0xd2,0x80,0xcf,0x06,0x00,0x00,0xe0,0xdb, + 0x82,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xd4,0x09,0xe3,0x10,0x81,0xcf,0x06, + 0x0c,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xe2,0x3b,0x82,0xe1,0x16,0x82,0xd0,0x06, + 0xcf,0x06,0x00,0x00,0xcf,0x86,0xa5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1, + 0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa1, + 0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa3, + 0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa4,0x00,0x14,0xff,0xf0,0x96, + 0xb9,0xa5,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa6,0x00,0x14,0xff,0xf0,0x96, + 0xb9,0xa7,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa8,0x00, + 0x14,0xff,0xf0,0x96,0xb9,0xa9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xaa,0x00, + 0x14,0xff,0xf0,0x96,0xb9,0xab,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9, + 0xac,0x00,0x14,0xff,0xf0,0x96,0xb9,0xad,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9, + 0xae,0x00,0x14,0xff,0xf0,0x96,0xb9,0xaf,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10, + 0x09,0x14,0xff,0xf0,0x96,0xb9,0xb0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb1,0x00,0x10, + 0x09,0x14,0xff,0xf0,0x96,0xb9,0xb2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb3,0x00,0xd1, + 0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb5, + 0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb7, + 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb8,0x00,0x14,0xff, + 0xf0,0x96,0xb9,0xb9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xba,0x00,0x14,0xff, + 0xf0,0x96,0xb9,0xbb,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbc,0x00, + 0x14,0xff,0xf0,0x96,0xb9,0xbd,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbe,0x00, + 0x14,0xff,0xf0,0x96,0xb9,0xbf,0x00,0x14,0x00,0xd2,0x14,0xe1,0x25,0x82,0xe0,0x1c, + 0x82,0xcf,0x86,0xe5,0xdd,0x81,0xe4,0x9a,0x81,0xcf,0x06,0x12,0x00,0xd1,0x0b,0xe0, + 0x51,0x83,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0x95,0x8b,0xcf,0x86,0xd5,0x22,0xe4, + 0xd0,0x88,0xe3,0x93,0x88,0xe2,0x38,0x88,0xe1,0x31,0x88,0xe0,0x2a,0x88,0xcf,0x86, + 0xe5,0xfb,0x87,0xe4,0xe2,0x87,0x93,0x07,0x62,0xd1,0x87,0x12,0xe6,0x12,0xe6,0xe4, + 0x36,0x89,0xe3,0x2f,0x89,0xd2,0x09,0xe1,0xb8,0x88,0xcf,0x06,0x10,0x00,0xe1,0x1f, + 0x89,0xe0,0xec,0x88,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1, + 0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa3, + 0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa5, + 0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa6,0x00,0x12,0xff,0xf0,0x9e, + 0xa4,0xa7,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa8,0x00,0x12,0xff,0xf0,0x9e, + 0xa4,0xa9,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xaa,0x00, + 0x12,0xff,0xf0,0x9e,0xa4,0xab,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xac,0x00, + 0x12,0xff,0xf0,0x9e,0xa4,0xad,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4, + 0xae,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xaf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4, + 0xb0,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb1,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10, + 0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb3,0x00,0x10, + 0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb5,0x00,0xd1, + 0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb7, + 0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb9, + 0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xba,0x00,0x12,0xff, + 0xf0,0x9e,0xa4,0xbb,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbc,0x00,0x12,0xff, + 0xf0,0x9e,0xa4,0xbd,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbe,0x00, + 0x12,0xff,0xf0,0x9e,0xa4,0xbf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa5,0x80,0x00, + 0x12,0xff,0xf0,0x9e,0xa5,0x81,0x00,0x94,0x1e,0x93,0x1a,0x92,0x16,0x91,0x12,0x10, + 0x09,0x12,0xff,0xf0,0x9e,0xa5,0x82,0x00,0x12,0xff,0xf0,0x9e,0xa5,0x83,0x00,0x12, + 0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + /* nfdi_c0100 */ + 0x57,0x04,0x01,0x00,0xc6,0xe5,0xac,0x13,0xe4,0x41,0x0c,0xe3,0x7a,0x07,0xe2,0xf3, + 0x01,0xc1,0xd0,0x1f,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x15,0x53,0x04,0x01,0x00, + 0x52,0x04,0x01,0x00,0x91,0x09,0x10,0x04,0x01,0x00,0x01,0xff,0x00,0x01,0x00,0x01, + 0x00,0xcf,0x86,0xd5,0xe4,0xd4,0x7c,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x41,0xcc,0x80,0x00,0x01,0xff,0x41,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x41, + 0xcc,0x82,0x00,0x01,0xff,0x41,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x41, + 0xcc,0x88,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x43, + 0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x80,0x00,0x01, + 0xff,0x45,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x82,0x00,0x01,0xff,0x45, + 0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x80,0x00,0x01,0xff,0x49, + 0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x82,0x00,0x01,0xff,0x49,0xcc,0x88, + 0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x4e,0xcc,0x83, + 0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x80,0x00,0x01,0xff,0x4f,0xcc,0x81,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x82,0x00,0x01,0xff,0x4f,0xcc,0x83,0x00,0x10, + 0x08,0x01,0xff,0x4f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01, + 0x00,0x01,0xff,0x55,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x81,0x00,0x01, + 0xff,0x55,0xcc,0x82,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x88,0x00,0x01, + 0xff,0x59,0xcc,0x81,0x00,0x01,0x00,0xd4,0x7c,0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x81,0x00,0x10,0x08,0x01, + 0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0x83,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x01, 0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x80, 0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x82,0x00,0x01, 0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x80,0x00,0x01, 0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0x82,0x00,0x01,0xff,0x69, - 0xcc,0x88,0x00,0xd3,0x3b,0xd2,0x1f,0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb0,0x00, - 0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80,0x00,0x01,0xff, - 0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82,0x00,0x01,0xff, - 0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1f, - 0xd1,0x0f,0x10,0x07,0x01,0xff,0xc3,0xb8,0x00,0x01,0xff,0x75,0xcc,0x80,0x00,0x10, - 0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x07,0x01, - 0xff,0xc3,0xbe,0x00,0x01,0xff,0x73,0x73,0x00,0xe1,0xd4,0x03,0xe0,0xeb,0x01,0xcf, - 0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61, - 0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x86, - 0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa8, - 0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,0x81,0x00,0x01, - 0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc,0x82, - 0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x63,0xcc,0x87,0x00,0x01, - 0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x63,0xcc,0x8c,0x00,0x01, - 0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x8c,0x00,0x01,0xff,0x64, - 0xcc,0x8c,0x00,0xd3,0x3b,0xd2,0x1b,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc4,0x91,0x00, - 0x01,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x84,0x00,0x01,0xff,0x65,0xcc,0x84,0x00, - 0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0x86,0x00, - 0x10,0x08,0x01,0xff,0x65,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87,0x00,0xd2,0x20, - 0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8,0x00, - 0x10,0x08,0x01,0xff,0x65,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x67,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82,0x00,0x10,0x08, - 0x01,0xff,0x67,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4,0x7b,0xd3,0x3b, - 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x67,0xcc,0x87,0x00,0x01,0xff,0x67,0xcc, - 0x87,0x00,0x10,0x08,0x01,0xff,0x67,0xcc,0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7,0x00, - 0xd1,0x10,0x10,0x08,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0xff,0x68,0xcc,0x82,0x00, - 0x10,0x07,0x01,0xff,0xc4,0xa7,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0x69,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x69, - 0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69, - 0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x69,0xcc,0xa8, - 0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x37,0xd2,0x17,0xd1,0x0c,0x10,0x08,0x01, - 0xff,0x69,0xcc,0x87,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc4,0xb3,0x00,0x01,0x00, - 0xd1,0x10,0x10,0x08,0x01,0xff,0x6a,0xcc,0x82,0x00,0x01,0xff,0x6a,0xcc,0x82,0x00, - 0x10,0x08,0x01,0xff,0x6b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,0x00,0xd2,0x1c, - 0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6c,0xcc,0x81,0x00,0x10,0x08,0x01,0xff, - 0x6c,0xcc,0x81,0x00,0x01,0xff,0x6c,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x6c,0xcc,0xa7,0x00,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc, - 0x8c,0x00,0x01,0xff,0xc5,0x80,0x00,0xcf,0x86,0xd5,0xed,0xd4,0x72,0xd3,0x37,0xd2, - 0x17,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc5,0x82,0x00,0x10,0x04,0x01,0x00, - 0x01,0xff,0x6e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00, - 0x01,0xff,0x6e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff, - 0x6e,0xcc,0x8c,0x00,0xd2,0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00, - 0x01,0xff,0xca,0xbc,0x6e,0x00,0x10,0x07,0x01,0xff,0xc5,0x8b,0x00,0x01,0x00,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0x84,0x00,0x10, - 0x08,0x01,0xff,0x6f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00,0xd3,0x3b,0xd2, - 0x1b,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0xff,0x6f,0xcc,0x8b, - 0x00,0x10,0x07,0x01,0xff,0xc5,0x93,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x72,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x72,0xcc, - 0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x72,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x73,0xcc, - 0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x73,0xcc, - 0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x73,0xcc,0xa7,0x00, - 0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x7b,0xd3,0x3b,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x73,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff, - 0x74,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x74,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x10,0x07,0x01,0xff,0xc5,0xa7, - 0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x83,0x00,0x01, - 0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x84,0x00,0x01,0xff,0x75, - 0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x86,0x00,0x01,0xff,0x75, - 0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a, - 0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x8b,0x00,0x01, - 0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa8,0x00,0x01,0xff,0x75, - 0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x82,0x00,0x01,0xff,0x77, - 0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x82,0x00,0x01,0xff,0x79,0xcc,0x82, - 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc,0x88,0x00,0x01,0xff,0x7a, - 0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff,0x7a,0xcc,0x87, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff,0x7a,0xcc,0x8c, - 0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0xff,0x73,0x00,0xe0,0x65,0x01, - 0xcf,0x86,0xd5,0xb4,0xd4,0x5a,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x01,0x00, - 0x01,0xff,0xc9,0x93,0x00,0x10,0x07,0x01,0xff,0xc6,0x83,0x00,0x01,0x00,0xd1,0x0b, - 0x10,0x07,0x01,0xff,0xc6,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0x94,0x00, - 0x01,0xff,0xc6,0x88,0x00,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc9, - 0x96,0x00,0x10,0x07,0x01,0xff,0xc9,0x97,0x00,0x01,0xff,0xc6,0x8c,0x00,0x51,0x04, - 0x01,0x00,0x10,0x07,0x01,0xff,0xc7,0x9d,0x00,0x01,0xff,0xc9,0x99,0x00,0xd3,0x32, - 0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xc9,0x9b,0x00,0x01,0xff,0xc6,0x92,0x00, - 0x10,0x04,0x01,0x00,0x01,0xff,0xc9,0xa0,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc9, - 0xa3,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xc9,0xa9,0x00,0x01,0xff,0xc9,0xa8,0x00, - 0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0x99,0x00,0x01,0x00,0x01,0x00,0xd1, - 0x0e,0x10,0x07,0x01,0xff,0xc9,0xaf,0x00,0x01,0xff,0xc9,0xb2,0x00,0x10,0x04,0x01, - 0x00,0x01,0xff,0xc9,0xb5,0x00,0xd4,0x5d,0xd3,0x34,0xd2,0x1b,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x10,0x07,0x01,0xff, - 0xc6,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc6,0xa5,0x00,0x01,0x00, - 0x10,0x07,0x01,0xff,0xca,0x80,0x00,0x01,0xff,0xc6,0xa8,0x00,0xd2,0x0f,0x91,0x0b, - 0x10,0x04,0x01,0x00,0x01,0xff,0xca,0x83,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01, - 0xff,0xc6,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xca,0x88,0x00,0x01,0xff,0x75, - 0xcc,0x9b,0x00,0xd3,0x33,0xd2,0x1d,0xd1,0x0f,0x10,0x08,0x01,0xff,0x75,0xcc,0x9b, - 0x00,0x01,0xff,0xca,0x8a,0x00,0x10,0x07,0x01,0xff,0xca,0x8b,0x00,0x01,0xff,0xc6, - 0xb4,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x01,0xff,0xc6,0xb6,0x00,0x10,0x04,0x01, - 0x00,0x01,0xff,0xca,0x92,0x00,0xd2,0x0f,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xb9, - 0x00,0x01,0x00,0x01,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xc6,0xbd,0x00,0x01,0x00, - 0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x44,0xd3,0x16,0x52,0x04,0x01,0x00,0x51,0x07, - 0x01,0xff,0xc7,0x86,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xc7,0x89,0x00,0xd2,0x12, - 0x91,0x0b,0x10,0x07,0x01,0xff,0xc7,0x89,0x00,0x01,0x00,0x01,0xff,0xc7,0x8c,0x00, - 0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x61,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff, - 0x61,0xcc,0x8c,0x00,0x01,0xff,0x69,0xcc,0x8c,0x00,0xd3,0x46,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x10,0x08, - 0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x8c,0x00,0xd1,0x12,0x10,0x08, - 0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x10,0x0a, - 0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00, - 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff, - 0x75,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00, - 0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0x75,0xcc, - 0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00, - 0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x87,0xd3,0x41,0xd2,0x26,0xd1,0x14, - 0x10,0x0a,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x87,0xcc, - 0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x01,0xff,0xc3,0xa6,0xcc, - 0x84,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xc7,0xa5,0x00,0x01,0x00,0x10,0x08,0x01, - 0xff,0x67,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x10,0x08,0x01, - 0xff,0x6f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,0x10,0x0a,0x01, - 0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x10, - 0x09,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0xd3, - 0x38,0xd2,0x1a,0xd1,0x0f,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,0x01,0xff,0xc7, - 0xb3,0x00,0x10,0x07,0x01,0xff,0xc7,0xb3,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01, - 0xff,0x67,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x10,0x07,0x04,0xff,0xc6, - 0x95,0x00,0x04,0xff,0xc6,0xbf,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x6e, - 0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x8a, - 0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01, - 0xff,0xc3,0xa6,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01, - 0xff,0xc3,0xb8,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x31,0x02, - 0xe1,0xad,0x44,0xe0,0xc8,0x01,0xcf,0x86,0xd5,0xfb,0xd4,0x80,0xd3,0x40,0xd2,0x20, - 0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00, - 0x10,0x08,0x01,0xff,0x61,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x65,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08, - 0x01,0xff,0x65,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x69,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08, - 0x01,0xff,0x69,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x6f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff, - 0x6f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x72,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08, - 0x01,0xff,0x72,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x75,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff, - 0x75,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x04,0xff,0x73,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff, - 0x74,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff, - 0xc8,0x9d,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x68,0xcc,0x8c,0x00,0x04,0xff,0x68, - 0xcc,0x8c,0x00,0xd4,0x79,0xd3,0x31,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xc6, - 0x9e,0x00,0x07,0x00,0x10,0x07,0x04,0xff,0xc8,0xa3,0x00,0x04,0x00,0xd1,0x0b,0x10, - 0x07,0x04,0xff,0xc8,0xa5,0x00,0x04,0x00,0x10,0x08,0x04,0xff,0x61,0xcc,0x87,0x00, - 0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x65,0xcc, - 0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x88,0xcc, - 0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,0x00,0xd1,0x14,0x10,0x0a,0x04,0xff, - 0x6f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x10,0x08, - 0x04,0xff,0x6f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0xd3,0x27,0xe2,0x0b, - 0x43,0xd1,0x14,0x10,0x0a,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x04,0xff,0x6f, - 0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x79,0xcc,0x84,0x00,0x04,0xff,0x79, - 0xcc,0x84,0x00,0xd2,0x13,0x51,0x04,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa5, - 0x00,0x08,0xff,0xc8,0xbc,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff,0xc6,0x9a, - 0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0xa6,0x00,0x08,0x00,0xcf,0x86,0x95,0x5f,0x94, - 0x5b,0xd3,0x2f,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08,0xff,0xc9,0x82,0x00, - 0x10,0x04,0x09,0x00,0x09,0xff,0xc6,0x80,0x00,0xd1,0x0e,0x10,0x07,0x09,0xff,0xca, - 0x89,0x00,0x09,0xff,0xca,0x8c,0x00,0x10,0x07,0x09,0xff,0xc9,0x87,0x00,0x09,0x00, - 0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x89,0x00,0x09,0x00,0x10,0x07,0x09, - 0xff,0xc9,0x8b,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff,0xc9,0x8d,0x00,0x09, - 0x00,0x10,0x07,0x09,0xff,0xc9,0x8f,0x00,0x09,0x00,0x01,0x00,0x01,0x00,0xd1,0x8b, - 0xd0,0x0c,0xcf,0x86,0xe5,0xfa,0x42,0x64,0xd9,0x42,0x01,0xe6,0xcf,0x86,0xd5,0x2a, - 0xe4,0x82,0x43,0xe3,0x69,0x43,0xd2,0x11,0xe1,0x48,0x43,0x10,0x07,0x01,0xff,0xcc, - 0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0xe1,0x4f,0x43,0x10,0x09,0x01,0xff,0xcc,0x88, - 0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0x00,0xd4,0x0f,0x93,0x0b,0x92,0x07,0x61,0x94, - 0x43,0x01,0xea,0x06,0xe6,0x06,0xe6,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x0a, - 0xff,0xcd,0xb1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb3,0x00,0x0a,0x00,0xd1, - 0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x10,0x07,0x0a,0xff,0xcd,0xb7, - 0x00,0x0a,0x00,0xd2,0x07,0x61,0x80,0x43,0x00,0x00,0x51,0x04,0x09,0x00,0x10,0x06, - 0x01,0xff,0x3b,0x00,0x10,0xff,0xcf,0xb3,0x00,0xe0,0x31,0x01,0xcf,0x86,0xd5,0xd3, - 0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff, - 0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x81,0x00,0x01,0xff, - 0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00, - 0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00, - 0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x00,0x00,0x10, - 0x09,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0xd3, - 0x3c,0xd2,0x20,0xd1,0x12,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00, - 0x01,0xff,0xce,0xb1,0x00,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb3, - 0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xb4,0x00,0x01,0xff,0xce,0xb5,0x00,0x10, - 0x07,0x01,0xff,0xce,0xb6,0x00,0x01,0xff,0xce,0xb7,0x00,0xd2,0x1c,0xd1,0x0e,0x10, - 0x07,0x01,0xff,0xce,0xb8,0x00,0x01,0xff,0xce,0xb9,0x00,0x10,0x07,0x01,0xff,0xce, - 0xba,0x00,0x01,0xff,0xce,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xce,0xbc,0x00, - 0x01,0xff,0xce,0xbd,0x00,0x10,0x07,0x01,0xff,0xce,0xbe,0x00,0x01,0xff,0xce,0xbf, - 0x00,0xe4,0x6e,0x43,0xd3,0x35,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcf,0x80, - 0x00,0x01,0xff,0xcf,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x83,0x00,0xd1, - 0x0e,0x10,0x07,0x01,0xff,0xcf,0x84,0x00,0x01,0xff,0xcf,0x85,0x00,0x10,0x07,0x01, - 0xff,0xcf,0x86,0x00,0x01,0xff,0xcf,0x87,0x00,0xe2,0x14,0x43,0xd1,0x0e,0x10,0x07, - 0x01,0xff,0xcf,0x88,0x00,0x01,0xff,0xcf,0x89,0x00,0x10,0x09,0x01,0xff,0xce,0xb9, - 0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xcf,0x86,0xd5,0x94,0xd4,0x3c, - 0xd3,0x13,0x92,0x0f,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0x83,0x00,0x01, - 0x00,0x01,0x00,0xd2,0x07,0x61,0x23,0x43,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff, - 0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff, - 0xcf,0x89,0xcc,0x81,0x00,0x0a,0xff,0xcf,0x97,0x00,0xd3,0x2c,0xd2,0x11,0xe1,0x2f, - 0x43,0x10,0x07,0x01,0xff,0xce,0xb2,0x00,0x01,0xff,0xce,0xb8,0x00,0xd1,0x10,0x10, - 0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0xff,0xcf,0x86,0x00,0x10,0x07,0x01, - 0xff,0xcf,0x80,0x00,0x04,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff,0xcf,0x99, - 0x00,0x06,0x00,0x10,0x07,0x01,0xff,0xcf,0x9b,0x00,0x04,0x00,0xd1,0x0b,0x10,0x07, - 0x01,0xff,0xcf,0x9d,0x00,0x04,0x00,0x10,0x07,0x01,0xff,0xcf,0x9f,0x00,0x04,0x00, - 0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa1,0x00,0x04, - 0x00,0x10,0x07,0x01,0xff,0xcf,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff, - 0xcf,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf,0xa7,0x00,0x01,0x00,0xd2,0x16, - 0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xcf, - 0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xcf,0xad,0x00,0x01,0x00,0x10, - 0x07,0x01,0xff,0xcf,0xaf,0x00,0x01,0x00,0xd3,0x2b,0xd2,0x12,0x91,0x0e,0x10,0x07, - 0x01,0xff,0xce,0xba,0x00,0x01,0xff,0xcf,0x81,0x00,0x01,0x00,0xd1,0x0e,0x10,0x07, - 0x05,0xff,0xce,0xb8,0x00,0x05,0xff,0xce,0xb5,0x00,0x10,0x04,0x06,0x00,0x07,0xff, - 0xcf,0xb8,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04,0x07,0x00,0x07,0xff,0xcf,0xb2,0x00, - 0x10,0x07,0x07,0xff,0xcf,0xbb,0x00,0x07,0x00,0xd1,0x0b,0x10,0x04,0x08,0x00,0x08, - 0xff,0xcd,0xbb,0x00,0x10,0x07,0x08,0xff,0xcd,0xbc,0x00,0x08,0xff,0xcd,0xbd,0x00, - 0xe3,0xd6,0x46,0xe2,0x3d,0x05,0xe1,0x27,0x02,0xe0,0x66,0x01,0xcf,0x86,0xd5,0xf0, - 0xd4,0x7e,0xd3,0x40,0xd2,0x22,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0xb5,0xcc,0x80, - 0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x07,0x01,0xff,0xd1,0x92,0x00,0x01, - 0xff,0xd0,0xb3,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x94,0x00,0x01, - 0xff,0xd1,0x95,0x00,0x10,0x07,0x01,0xff,0xd1,0x96,0x00,0x01,0xff,0xd1,0x96,0xcc, - 0x88,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x98,0x00,0x01,0xff,0xd1, - 0x99,0x00,0x10,0x07,0x01,0xff,0xd1,0x9a,0x00,0x01,0xff,0xd1,0x9b,0x00,0xd1,0x12, - 0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80,0x00, - 0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0xff,0xd1,0x9f,0x00,0xd3,0x38, - 0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xb0,0x00,0x01,0xff,0xd0,0xb1,0x00, - 0x10,0x07,0x01,0xff,0xd0,0xb2,0x00,0x01,0xff,0xd0,0xb3,0x00,0xd1,0x0e,0x10,0x07, - 0x01,0xff,0xd0,0xb4,0x00,0x01,0xff,0xd0,0xb5,0x00,0x10,0x07,0x01,0xff,0xd0,0xb6, - 0x00,0x01,0xff,0xd0,0xb7,0x00,0xd2,0x1e,0xd1,0x10,0x10,0x07,0x01,0xff,0xd0,0xb8, - 0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x10,0x07,0x01,0xff,0xd0,0xba,0x00,0x01, - 0xff,0xd0,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd0,0xbc,0x00,0x01,0xff,0xd0, - 0xbd,0x00,0x10,0x07,0x01,0xff,0xd0,0xbe,0x00,0x01,0xff,0xd0,0xbf,0x00,0xe4,0x0e, - 0x42,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x80,0x00,0x01,0xff, - 0xd1,0x81,0x00,0x10,0x07,0x01,0xff,0xd1,0x82,0x00,0x01,0xff,0xd1,0x83,0x00,0xd1, - 0x0e,0x10,0x07,0x01,0xff,0xd1,0x84,0x00,0x01,0xff,0xd1,0x85,0x00,0x10,0x07,0x01, - 0xff,0xd1,0x86,0x00,0x01,0xff,0xd1,0x87,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01, - 0xff,0xd1,0x88,0x00,0x01,0xff,0xd1,0x89,0x00,0x10,0x07,0x01,0xff,0xd1,0x8a,0x00, - 0x01,0xff,0xd1,0x8b,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd1,0x8c,0x00,0x01,0xff, - 0xd1,0x8d,0x00,0x10,0x07,0x01,0xff,0xd1,0x8e,0x00,0x01,0xff,0xd1,0x8f,0x00,0xcf, - 0x86,0xd5,0x07,0x64,0xb8,0x41,0x01,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b, - 0x10,0x07,0x01,0xff,0xd1,0xa1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xa3,0x00, - 0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa5,0x00,0x01,0x00,0x10,0x07,0x01, - 0xff,0xd1,0xa7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xa9, - 0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07, - 0x01,0xff,0xd1,0xad,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xaf,0x00,0x01,0x00, - 0xd3,0x33,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb1,0x00,0x01,0x00,0x10, - 0x07,0x01,0xff,0xd1,0xb3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb5, - 0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01,0xff,0xd1,0xb5, - 0xcc,0x8f,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1,0xb9,0x00,0x01,0x00, - 0x10,0x07,0x01,0xff,0xd1,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd1, - 0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd1,0xbf,0x00,0x01,0x00,0xe0,0x41,0x01, - 0xcf,0x86,0xd5,0x8e,0xd4,0x36,0xd3,0x11,0xe2,0x7a,0x41,0xe1,0x71,0x41,0x10,0x07, - 0x01,0xff,0xd2,0x81,0x00,0x01,0x00,0xd2,0x0f,0x51,0x04,0x04,0x00,0x10,0x07,0x06, - 0xff,0xd2,0x8b,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x04,0xff,0xd2,0x8d,0x00,0x04, - 0x00,0x10,0x07,0x04,0xff,0xd2,0x8f,0x00,0x04,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b, - 0x10,0x07,0x01,0xff,0xd2,0x91,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x93,0x00, - 0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x95,0x00,0x01,0x00,0x10,0x07,0x01, - 0xff,0xd2,0x97,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0x99, - 0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9b,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07, - 0x01,0xff,0xd2,0x9d,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0x9f,0x00,0x01,0x00, - 0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa1,0x00,0x01, - 0x00,0x10,0x07,0x01,0xff,0xd2,0xa3,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff, - 0xd2,0xa5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xa7,0x00,0x01,0x00,0xd2,0x16, - 0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xa9,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2, - 0xab,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xad,0x00,0x01,0x00,0x10, - 0x07,0x01,0xff,0xd2,0xaf,0x00,0x01,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07, - 0x01,0xff,0xd2,0xb1,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xb3,0x00,0x01,0x00, - 0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb5,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2, - 0xb7,0x00,0x01,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd2,0xb9,0x00,0x01, - 0x00,0x10,0x07,0x01,0xff,0xd2,0xbb,0x00,0x01,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff, - 0xd2,0xbd,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xd2,0xbf,0x00,0x01,0x00,0xcf,0x86, - 0xd5,0xdc,0xd4,0x5a,0xd3,0x36,0xd2,0x20,0xd1,0x10,0x10,0x07,0x01,0xff,0xd3,0x8f, - 0x00,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb6,0xcc,0x86, - 0x00,0x01,0xff,0xd3,0x84,0x00,0xd1,0x0b,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x86, - 0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x88,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x04, - 0x01,0x00,0x06,0xff,0xd3,0x8a,0x00,0x10,0x04,0x06,0x00,0x01,0xff,0xd3,0x8c,0x00, - 0xe1,0x52,0x40,0x10,0x04,0x01,0x00,0x06,0xff,0xd3,0x8e,0x00,0xd3,0x41,0xd2,0x24, - 0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb0,0xcc, - 0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb0,0xcc, - 0x88,0x00,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0x95,0x00,0x01,0x00,0x10,0x09,0x01, - 0xff,0xd0,0xb5,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0xd2,0x1d,0xd1, - 0x0b,0x10,0x07,0x01,0xff,0xd3,0x99,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x99, - 0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff, - 0xd0,0xb6,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff, - 0xd0,0xb7,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4,0x82,0xd3,0x41, - 0xd2,0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa1,0x00,0x01,0x00,0x10,0x09,0x01, - 0xff,0xd0,0xb8,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0xd1,0x12,0x10, - 0x09,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00,0x10, - 0x09,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00,0xd2, - 0x1d,0xd1,0x0b,0x10,0x07,0x01,0xff,0xd3,0xa9,0x00,0x01,0x00,0x10,0x09,0x01,0xff, - 0xd3,0xa9,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09, - 0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09, - 0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x41, - 0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x01,0xff,0xd1, - 0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0x01,0xff,0xd1, - 0x83,0xcc,0x8b,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00,0x01, - 0xff,0xd1,0x87,0xcc,0x88,0x00,0x10,0x07,0x08,0xff,0xd3,0xb7,0x00,0x08,0x00,0xd2, - 0x1d,0xd1,0x12,0x10,0x09,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x01,0xff,0xd1,0x8b, - 0xcc,0x88,0x00,0x10,0x07,0x09,0xff,0xd3,0xbb,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07, - 0x09,0xff,0xd3,0xbd,0x00,0x09,0x00,0x10,0x07,0x09,0xff,0xd3,0xbf,0x00,0x09,0x00, - 0xe1,0x26,0x02,0xe0,0x78,0x01,0xcf,0x86,0xd5,0xb0,0xd4,0x58,0xd3,0x2c,0xd2,0x16, - 0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x81,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4, - 0x83,0x00,0x06,0x00,0xd1,0x0b,0x10,0x07,0x06,0xff,0xd4,0x85,0x00,0x06,0x00,0x10, - 0x07,0x06,0xff,0xd4,0x87,0x00,0x06,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x06,0xff, - 0xd4,0x89,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8b,0x00,0x06,0x00,0xd1,0x0b, - 0x10,0x07,0x06,0xff,0xd4,0x8d,0x00,0x06,0x00,0x10,0x07,0x06,0xff,0xd4,0x8f,0x00, - 0x06,0x00,0xd3,0x2c,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x09,0xff,0xd4,0x91,0x00,0x09, - 0x00,0x10,0x07,0x09,0xff,0xd4,0x93,0x00,0x09,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff, - 0xd4,0x95,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0x97,0x00,0x0a,0x00,0xd2,0x16, - 0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x99,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4, - 0x9b,0x00,0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0a,0xff,0xd4,0x9d,0x00,0x0a,0x00,0x10, - 0x07,0x0a,0xff,0xd4,0x9f,0x00,0x0a,0x00,0xd4,0x58,0xd3,0x2c,0xd2,0x16,0xd1,0x0b, - 0x10,0x07,0x0a,0xff,0xd4,0xa1,0x00,0x0a,0x00,0x10,0x07,0x0a,0xff,0xd4,0xa3,0x00, - 0x0a,0x00,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xd4,0xa5,0x00,0x0b,0x00,0x10,0x07,0x0c, - 0xff,0xd4,0xa7,0x00,0x0c,0x00,0xd2,0x16,0xd1,0x0b,0x10,0x07,0x10,0xff,0xd4,0xa9, - 0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xab,0x00,0x10,0x00,0xd1,0x0b,0x10,0x07, - 0x10,0xff,0xd4,0xad,0x00,0x10,0x00,0x10,0x07,0x10,0xff,0xd4,0xaf,0x00,0x10,0x00, - 0xd3,0x35,0xd2,0x19,0xd1,0x0b,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xa1,0x00,0x10, - 0x07,0x01,0xff,0xd5,0xa2,0x00,0x01,0xff,0xd5,0xa3,0x00,0xd1,0x0e,0x10,0x07,0x01, - 0xff,0xd5,0xa4,0x00,0x01,0xff,0xd5,0xa5,0x00,0x10,0x07,0x01,0xff,0xd5,0xa6,0x00, - 0x01,0xff,0xd5,0xa7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xa8,0x00, - 0x01,0xff,0xd5,0xa9,0x00,0x10,0x07,0x01,0xff,0xd5,0xaa,0x00,0x01,0xff,0xd5,0xab, - 0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xac,0x00,0x01,0xff,0xd5,0xad,0x00,0x10, - 0x07,0x01,0xff,0xd5,0xae,0x00,0x01,0xff,0xd5,0xaf,0x00,0xcf,0x86,0xe5,0xf1,0x3e, - 0xd4,0x70,0xd3,0x38,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb0,0x00,0x01, - 0xff,0xd5,0xb1,0x00,0x10,0x07,0x01,0xff,0xd5,0xb2,0x00,0x01,0xff,0xd5,0xb3,0x00, - 0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xb4,0x00,0x01,0xff,0xd5,0xb5,0x00,0x10,0x07, - 0x01,0xff,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb7,0x00,0xd2,0x1c,0xd1,0x0e,0x10,0x07, - 0x01,0xff,0xd5,0xb8,0x00,0x01,0xff,0xd5,0xb9,0x00,0x10,0x07,0x01,0xff,0xd5,0xba, - 0x00,0x01,0xff,0xd5,0xbb,0x00,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd5,0xbc,0x00,0x01, - 0xff,0xd5,0xbd,0x00,0x10,0x07,0x01,0xff,0xd5,0xbe,0x00,0x01,0xff,0xd5,0xbf,0x00, - 0xe3,0x70,0x3e,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x01,0xff,0xd6,0x80,0x00,0x01,0xff, - 0xd6,0x81,0x00,0x10,0x07,0x01,0xff,0xd6,0x82,0x00,0x01,0xff,0xd6,0x83,0x00,0xd1, - 0x0e,0x10,0x07,0x01,0xff,0xd6,0x84,0x00,0x01,0xff,0xd6,0x85,0x00,0x10,0x07,0x01, - 0xff,0xd6,0x86,0x00,0x00,0x00,0xe0,0x18,0x3f,0xcf,0x86,0xe5,0xa9,0x3e,0xe4,0x80, - 0x3e,0xe3,0x5f,0x3e,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, - 0x01,0xff,0xd5,0xa5,0xd6,0x82,0x00,0xe4,0x3e,0x25,0xe3,0xc4,0x1a,0xe2,0xf8,0x80, - 0xe1,0xc0,0x13,0xd0,0x1e,0xcf,0x86,0xc5,0xe4,0xf0,0x4a,0xe3,0x3b,0x46,0xe2,0xd1, - 0x43,0xe1,0x04,0x43,0xe0,0xc9,0x42,0xcf,0x86,0xe5,0x8e,0x42,0x64,0x71,0x42,0x0b, - 0x00,0xcf,0x86,0xe5,0xfa,0x01,0xe4,0xd5,0x55,0xe3,0x76,0x01,0xe2,0x76,0x53,0xd1, - 0x0c,0xe0,0xd7,0x52,0xcf,0x86,0x65,0x75,0x52,0x04,0x00,0xe0,0x0d,0x01,0xcf,0x86, - 0xd5,0x0a,0xe4,0xf8,0x52,0x63,0xe7,0x52,0x0a,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20, - 0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0x80,0x00,0x01,0xff,0xe2,0xb4,0x81,0x00, - 0x10,0x08,0x01,0xff,0xe2,0xb4,0x82,0x00,0x01,0xff,0xe2,0xb4,0x83,0x00,0xd1,0x10, - 0x10,0x08,0x01,0xff,0xe2,0xb4,0x84,0x00,0x01,0xff,0xe2,0xb4,0x85,0x00,0x10,0x08, - 0x01,0xff,0xe2,0xb4,0x86,0x00,0x01,0xff,0xe2,0xb4,0x87,0x00,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x01,0xff,0xe2,0xb4,0x88,0x00,0x01,0xff,0xe2,0xb4,0x89,0x00,0x10,0x08, - 0x01,0xff,0xe2,0xb4,0x8a,0x00,0x01,0xff,0xe2,0xb4,0x8b,0x00,0xd1,0x10,0x10,0x08, - 0x01,0xff,0xe2,0xb4,0x8c,0x00,0x01,0xff,0xe2,0xb4,0x8d,0x00,0x10,0x08,0x01,0xff, - 0xe2,0xb4,0x8e,0x00,0x01,0xff,0xe2,0xb4,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x01,0xff,0xe2,0xb4,0x90,0x00,0x01,0xff,0xe2,0xb4,0x91,0x00,0x10,0x08, - 0x01,0xff,0xe2,0xb4,0x92,0x00,0x01,0xff,0xe2,0xb4,0x93,0x00,0xd1,0x10,0x10,0x08, - 0x01,0xff,0xe2,0xb4,0x94,0x00,0x01,0xff,0xe2,0xb4,0x95,0x00,0x10,0x08,0x01,0xff, - 0xe2,0xb4,0x96,0x00,0x01,0xff,0xe2,0xb4,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x01,0xff,0xe2,0xb4,0x98,0x00,0x01,0xff,0xe2,0xb4,0x99,0x00,0x10,0x08,0x01,0xff, - 0xe2,0xb4,0x9a,0x00,0x01,0xff,0xe2,0xb4,0x9b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0xe2,0xb4,0x9c,0x00,0x01,0xff,0xe2,0xb4,0x9d,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4, - 0x9e,0x00,0x01,0xff,0xe2,0xb4,0x9f,0x00,0xcf,0x86,0xe5,0x2a,0x52,0x94,0x50,0xd3, - 0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa0,0x00,0x01,0xff,0xe2, - 0xb4,0xa1,0x00,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa2,0x00,0x01,0xff,0xe2,0xb4,0xa3, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0xb4,0xa4,0x00,0x01,0xff,0xe2,0xb4,0xa5, - 0x00,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xa7,0x00,0x52,0x04,0x00,0x00,0x91, - 0x0c,0x10,0x04,0x00,0x00,0x0d,0xff,0xe2,0xb4,0xad,0x00,0x00,0x00,0x01,0x00,0xd2, - 0x1b,0xe1,0xce,0x52,0xe0,0x7f,0x52,0xcf,0x86,0x95,0x0f,0x94,0x0b,0x93,0x07,0x62, - 0x64,0x52,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd1,0x13,0xe0,0xa5,0x53,0xcf, - 0x86,0x95,0x0a,0xe4,0x7a,0x53,0x63,0x69,0x53,0x04,0x00,0x04,0x00,0xd0,0x0d,0xcf, - 0x86,0x95,0x07,0x64,0xf4,0x53,0x08,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00, - 0x54,0x04,0x04,0x00,0xd3,0x07,0x62,0x01,0x54,0x04,0x00,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x11,0xff,0xe1,0x8f,0xb0,0x00,0x11,0xff,0xe1,0x8f,0xb1,0x00,0x10,0x08,0x11, - 0xff,0xe1,0x8f,0xb2,0x00,0x11,0xff,0xe1,0x8f,0xb3,0x00,0x91,0x10,0x10,0x08,0x11, - 0xff,0xe1,0x8f,0xb4,0x00,0x11,0xff,0xe1,0x8f,0xb5,0x00,0x00,0x00,0xd4,0x1c,0xe3, - 0x92,0x56,0xe2,0xc9,0x55,0xe1,0x8c,0x55,0xe0,0x6d,0x55,0xcf,0x86,0x95,0x0a,0xe4, - 0x56,0x55,0x63,0x45,0x55,0x04,0x00,0x04,0x00,0xe3,0xd2,0x01,0xe2,0xdd,0x59,0xd1, - 0x0c,0xe0,0xfe,0x58,0xcf,0x86,0x65,0xd7,0x58,0x0a,0x00,0xe0,0x4e,0x59,0xcf,0x86, - 0xd5,0xc5,0xd4,0x45,0xd3,0x31,0xd2,0x1c,0xd1,0x0e,0x10,0x07,0x12,0xff,0xd0,0xb2, - 0x00,0x12,0xff,0xd0,0xb4,0x00,0x10,0x07,0x12,0xff,0xd0,0xbe,0x00,0x12,0xff,0xd1, - 0x81,0x00,0x51,0x07,0x12,0xff,0xd1,0x82,0x00,0x10,0x07,0x12,0xff,0xd1,0x8a,0x00, - 0x12,0xff,0xd1,0xa3,0x00,0x92,0x10,0x91,0x0c,0x10,0x08,0x12,0xff,0xea,0x99,0x8b, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14, - 0xff,0xe1,0x83,0x90,0x00,0x14,0xff,0xe1,0x83,0x91,0x00,0x10,0x08,0x14,0xff,0xe1, - 0x83,0x92,0x00,0x14,0xff,0xe1,0x83,0x93,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1, - 0x83,0x94,0x00,0x14,0xff,0xe1,0x83,0x95,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x96, - 0x00,0x14,0xff,0xe1,0x83,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1, - 0x83,0x98,0x00,0x14,0xff,0xe1,0x83,0x99,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x9a, - 0x00,0x14,0xff,0xe1,0x83,0x9b,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0x9c, - 0x00,0x14,0xff,0xe1,0x83,0x9d,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0x9e,0x00,0x14, - 0xff,0xe1,0x83,0x9f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14, - 0xff,0xe1,0x83,0xa0,0x00,0x14,0xff,0xe1,0x83,0xa1,0x00,0x10,0x08,0x14,0xff,0xe1, - 0x83,0xa2,0x00,0x14,0xff,0xe1,0x83,0xa3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1, - 0x83,0xa4,0x00,0x14,0xff,0xe1,0x83,0xa5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xa6, - 0x00,0x14,0xff,0xe1,0x83,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1, - 0x83,0xa8,0x00,0x14,0xff,0xe1,0x83,0xa9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xaa, - 0x00,0x14,0xff,0xe1,0x83,0xab,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xac, - 0x00,0x14,0xff,0xe1,0x83,0xad,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xae,0x00,0x14, - 0xff,0xe1,0x83,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1, - 0x83,0xb0,0x00,0x14,0xff,0xe1,0x83,0xb1,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xb2, - 0x00,0x14,0xff,0xe1,0x83,0xb3,0x00,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xb4, - 0x00,0x14,0xff,0xe1,0x83,0xb5,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xb6,0x00,0x14, - 0xff,0xe1,0x83,0xb7,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x14,0xff,0xe1,0x83,0xb8, - 0x00,0x14,0xff,0xe1,0x83,0xb9,0x00,0x10,0x08,0x14,0xff,0xe1,0x83,0xba,0x00,0x00, - 0x00,0xd1,0x0c,0x10,0x04,0x00,0x00,0x14,0xff,0xe1,0x83,0xbd,0x00,0x10,0x08,0x14, - 0xff,0xe1,0x83,0xbe,0x00,0x14,0xff,0xe1,0x83,0xbf,0x00,0xe2,0x9d,0x08,0xe1,0x48, - 0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3,0x40,0xd2,0x20,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa5,0x00,0x01,0xff,0x61,0xcc,0xa5,0x00,0x10, - 0x08,0x01,0xff,0x62,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87,0x00,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x62,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3,0x00,0x10,0x08,0x01, - 0xff,0x62,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2,0x24,0xd1,0x14,0x10, - 0x0a,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc,0xa7,0xcc,0x81, - 0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0x87,0x00,0x01,0xff,0x64,0xcc,0x87,0x00,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa3,0x00,0x01,0xff,0x64,0xcc,0xa3,0x00,0x10, - 0x08,0x01,0xff,0x64,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1,0x00,0xd3,0x48,0xd2, - 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x64,0xcc,0xa7,0x00,0x01,0xff,0x64,0xcc,0xa7, - 0x00,0x10,0x08,0x01,0xff,0x64,0xcc,0xad,0x00,0x01,0xff,0x64,0xcc,0xad,0x00,0xd1, - 0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x84, - 0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0x01,0xff,0x65, - 0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xad, - 0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0xb0,0x00,0x01, - 0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86, - 0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x66,0xcc,0x87, - 0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x67,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84,0x00,0x10,0x08,0x01, - 0xff,0x68,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01, - 0xff,0x68,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x68, - 0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0x68,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x68, - 0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x69, - 0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01,0xff,0x69,0xcc,0x88, - 0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3,0x40,0xd2,0x20,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x6b,0xcc,0x81,0x00,0x01,0xff,0x6b,0xcc,0x81,0x00,0x10, - 0x08,0x01,0xff,0x6b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3,0x00,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1,0x00,0x10,0x08,0x01, - 0xff,0x6c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2,0x24,0xd1,0x14,0x10, - 0x0a,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c,0xcc,0xa3,0xcc,0x84, - 0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0xb1,0x00,0x01,0xff,0x6c,0xcc,0xb1,0x00,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xad,0x00,0x01,0xff,0x6c,0xcc,0xad,0x00,0x10, - 0x08,0x01,0xff,0x6d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81,0x00,0xcf,0x86,0xe5, - 0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6d,0xcc, - 0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6d,0xcc,0xa3,0x00, - 0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x87,0x00, - 0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa3,0x00,0x01,0xff, - 0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0xb1,0x00, - 0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xad,0x00,0x01,0xff, - 0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00, - 0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x83,0xcc, - 0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48,0xd2,0x28,0xd1,0x14, - 0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x84,0xcc, - 0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc, - 0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x70,0xcc,0x81,0x00,0x01,0xff, - 0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x70,0xcc,0x87,0x00,0x01,0xff,0x70,0xcc, - 0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x72,0xcc,0x87,0x00,0x01,0xff, - 0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xa3,0x00,0x01,0xff,0x72,0xcc, - 0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff, - 0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x72,0xcc,0xb1,0x00,0x01,0xff, - 0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x73,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x73,0xcc, - 0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc, - 0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00,0x10,0x0a,0x01,0xff, - 0x73,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc,0x87,0x00,0xd2,0x24, - 0xd1,0x14,0x10,0x0a,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc, - 0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0x87,0x00,0x01,0xff,0x74,0xcc, - 0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xa3,0x00,0x01,0xff,0x74,0xcc, - 0xa3,0x00,0x10,0x08,0x01,0xff,0x74,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc,0xb1,0x00, - 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x74,0xcc,0xad,0x00,0x01,0xff, - 0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xa4,0x00,0x01,0xff,0x75,0xcc, - 0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xb0,0x00,0x01,0xff,0x75,0xcc, - 0xb0,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0xad,0x00,0x01,0xff,0x75,0xcc,0xad,0x00, - 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x01,0xff, - 0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00, - 0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x76,0xcc, - 0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x76,0xcc,0xa3,0x00, - 0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x11,0x02,0xcf,0x86,0xd5,0xe2,0xd4,0x80,0xd3, - 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x80,0x00,0x01,0xff,0x77, - 0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x81,0x00,0x01,0xff,0x77,0xcc,0x81, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x88,0x00,0x01,0xff,0x77,0xcc,0x88, - 0x00,0x10,0x08,0x01,0xff,0x77,0xcc,0x87,0x00,0x01,0xff,0x77,0xcc,0x87,0x00,0xd2, - 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0xa3,0x00,0x01,0xff,0x77,0xcc,0xa3, - 0x00,0x10,0x08,0x01,0xff,0x78,0xcc,0x87,0x00,0x01,0xff,0x78,0xcc,0x87,0x00,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x78,0xcc,0x88,0x00,0x01,0xff,0x78,0xcc,0x88,0x00,0x10, - 0x08,0x01,0xff,0x79,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87,0x00,0xd3,0x33,0xd2, - 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x82,0x00,0x01,0xff,0x7a,0xcc,0x82, - 0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0xa3,0x00,0x01,0xff,0x7a,0xcc,0xa3,0x00,0xe1, - 0xc4,0x58,0x10,0x08,0x01,0xff,0x7a,0xcc,0xb1,0x00,0x01,0xff,0x7a,0xcc,0xb1,0x00, - 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff,0x79,0xcc, - 0x8a,0x00,0x10,0x08,0x01,0xff,0x61,0xca,0xbe,0x00,0x02,0xff,0x73,0xcc,0x87,0x00, - 0x51,0x04,0x0a,0x00,0x10,0x07,0x0a,0xff,0x73,0x73,0x00,0x0a,0x00,0xd4,0x98,0xd3, - 0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0xa3,0x00,0x01,0xff,0x61, - 0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x89, - 0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x01,0xff,0x61, - 0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0x01, - 0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61, - 0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01, - 0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x83,0x00,0xd1, - 0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc,0xa3, - 0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0x01,0xff,0x61, - 0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x61, - 0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00,0x10,0x0a,0x01, - 0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x89,0x00,0xd1, - 0x14,0x10,0x0a,0x01,0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x86, - 0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0x01,0xff,0x61, - 0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0xa3, - 0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x65,0xcc,0x89,0x00,0x01, - 0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x65,0xcc,0x83,0x00,0x01, - 0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0x01, - 0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,0xd4,0x90,0xd3,0x50, - 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x01,0xff, - 0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00, - 0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x65,0xcc, - 0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff, - 0x65,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82,0x00,0xd2,0x20, - 0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x89,0x00,0x01,0xff,0x69,0xcc,0x89,0x00, - 0x10,0x08,0x01,0xff,0x69,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3,0x00,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3,0x00,0x10,0x08, - 0x01,0xff,0x6f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3,0x50,0xd2,0x28, - 0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc, - 0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0x01,0xff, - 0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x82,0xcc, - 0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc, - 0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2,0x28,0xd1,0x14, - 0x10,0x0a,0x01,0xff,0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f,0xcc,0xa3,0xcc, - 0x82,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc, - 0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00, - 0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc, - 0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,0xd3,0x48,0xd2,0x28, - 0xd1,0x14,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc, - 0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff, - 0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0xa3,0x00, - 0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x89,0x00,0x01,0xff, - 0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc, - 0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc, - 0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a, - 0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x89,0x00, - 0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc, - 0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x9b,0xcc, - 0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79,0xcc, - 0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc, - 0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x79,0xcc,0x89,0x00, - 0x01,0xff,0x79,0xcc,0x89,0x00,0xd2,0x1c,0xd1,0x10,0x10,0x08,0x01,0xff,0x79,0xcc, - 0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbb,0x00, - 0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xe1,0xbb,0xbd,0x00,0x0a,0x00,0x10,0x08, - 0x0a,0xff,0xe1,0xbb,0xbf,0x00,0x0a,0x00,0xe1,0xbf,0x02,0xe0,0xa1,0x01,0xcf,0x86, - 0xd5,0xc6,0xd4,0x6c,0xd3,0x18,0xe2,0xc0,0x58,0xe1,0xa9,0x58,0x10,0x09,0x01,0xff, - 0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12, - 0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0x00, - 0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb1,0xcc, - 0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81, - 0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1, - 0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x18, - 0xe2,0xfc,0x58,0xe1,0xe5,0x58,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x93,0x00,0x01, - 0xff,0xce,0xb5,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5, - 0xcc,0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5, - 0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16, - 0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc, - 0x94,0xcc,0x81,0x00,0x00,0x00,0xd4,0x6c,0xd3,0x18,0xe2,0x26,0x59,0xe1,0x0f,0x59, - 0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00, - 0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce, - 0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01, - 0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7, - 0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b, - 0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd, - 0x82,0x00,0xd3,0x18,0xe2,0x62,0x59,0xe1,0x4b,0x59,0x10,0x09,0x01,0xff,0xce,0xb9, - 0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09, - 0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0x00,0x10,0x0b, - 0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc, - 0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x81,0x00,0x01, - 0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93, - 0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00,0xcf,0x86,0xd5,0xac, - 0xd4,0x5a,0xd3,0x18,0xe2,0x9f,0x59,0xe1,0x88,0x59,0x10,0x09,0x01,0xff,0xce,0xbf, - 0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09, - 0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0x10,0x0b, - 0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc, - 0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81,0x00,0x01, - 0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x18,0xe2,0xc9,0x59,0xe1, - 0xb2,0x59,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc, - 0x94,0x00,0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94, - 0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f, - 0x10,0x04,0x00,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00, - 0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xe4,0x85,0x5a,0xd3,0x18,0xe2, - 0x04,0x5a,0xe1,0xed,0x59,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff, - 0xcf,0x89,0xcc,0x94,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc, - 0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc, - 0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10, - 0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94, - 0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff, - 0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xe0,0xd9,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4, - 0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xce, - 0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce, - 0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80, - 0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xce, - 0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01, - 0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94, - 0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc, - 0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01, - 0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94, - 0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc, - 0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10, - 0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1, - 0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01, - 0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xce,0xb9, - 0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff, - 0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce, - 0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81, - 0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00, - 0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10, - 0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94, - 0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00, - 0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01, - 0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94, - 0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82,0xce, - 0xb9,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0xce,0xb9,0x00,0xd4,0xc8,0xd3, - 0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xce,0xb9,0x00, - 0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc, - 0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0xce,0xb9, - 0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0xce,0xb9,0x00, - 0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf, - 0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82, - 0xce,0xb9,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xce, - 0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xce,0xb9,0x00,0x10,0x0d,0x01,0xff,0xcf, - 0x89,0xcc,0x93,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80, - 0xce,0xb9,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0xce, - 0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0xce,0xb9,0x00,0x10,0x0d,0x01, - 0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94, - 0xcd,0x82,0xce,0xb9,0x00,0xd3,0x49,0xd2,0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce, - 0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce, - 0xb1,0xcc,0x80,0xce,0xb9,0x00,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00,0xd1,0x0f,0x10, - 0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff, - 0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24, - 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1,0xcc, - 0x84,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff,0xce,0xb1,0xcc, - 0x81,0x00,0xe1,0xa5,0x5a,0x10,0x09,0x01,0xff,0xce,0xb1,0xce,0xb9,0x00,0x01,0x00, - 0xcf,0x86,0xd5,0xbd,0xd4,0x7e,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04,0x01,0x00, - 0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x80,0xce, - 0xb9,0x00,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce, - 0xb7,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcd,0x82, - 0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09, - 0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10,0x09, - 0x01,0xff,0xce,0xb7,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0xe1,0xb4, - 0x5a,0x10,0x09,0x01,0xff,0xce,0xb7,0xce,0xb9,0x00,0x01,0xff,0xe1,0xbe,0xbf,0xcc, - 0x80,0x00,0xd3,0x18,0xe2,0xda,0x5a,0xe1,0xc3,0x5a,0x10,0x09,0x01,0xff,0xce,0xb9, - 0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0xe2,0xfe,0x5a,0xd1,0x12,0x10, - 0x09,0x01,0xff,0xce,0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10, - 0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd4, - 0x51,0xd3,0x18,0xe2,0x21,0x5b,0xe1,0x0a,0x5b,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc, - 0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01, - 0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00,0x10,0x09,0x01, - 0xff,0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0xe1,0x41,0x5b, - 0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00, - 0xd3,0x3b,0xd2,0x18,0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80, - 0xce,0xb9,0x00,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff, - 0xcf,0x89,0xcc,0x81,0xce,0xb9,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd, - 0x82,0x00,0x01,0xff,0xcf,0x89,0xcd,0x82,0xce,0xb9,0x00,0xd2,0x24,0xd1,0x12,0x10, - 0x09,0x01,0xff,0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10, - 0x09,0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0xe1, - 0x4b,0x5b,0x10,0x09,0x01,0xff,0xcf,0x89,0xce,0xb9,0x00,0x01,0xff,0xc2,0xb4,0x00, - 0xe0,0xa2,0x67,0xcf,0x86,0xe5,0x24,0x02,0xe4,0x26,0x01,0xe3,0x1b,0x5e,0xd2,0x2b, - 0xe1,0xf5,0x5b,0xe0,0x7a,0x5b,0xcf,0x86,0xe5,0x5f,0x5b,0x94,0x1c,0x93,0x18,0x92, - 0x14,0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83, - 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd1,0xd6,0xd0,0x46,0xcf,0x86,0x55, - 0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10, - 0x07,0x01,0xff,0xcf,0x89,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01,0x00,0x10,0x06, - 0x01,0xff,0x6b,0x00,0x01,0xff,0x61,0xcc,0x8a,0x00,0x01,0x00,0xe3,0xba,0x5c,0x92, - 0x10,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0x8e,0x00,0x01,0x00,0x01, - 0x00,0xcf,0x86,0xd5,0x0a,0xe4,0xd7,0x5c,0x63,0xc2,0x5c,0x06,0x00,0x94,0x80,0xd3, - 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb0,0x00,0x01,0xff,0xe2, - 0x85,0xb1,0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb2,0x00,0x01,0xff,0xe2,0x85,0xb3, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb4,0x00,0x01,0xff,0xe2,0x85,0xb5, - 0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xb6,0x00,0x01,0xff,0xe2,0x85,0xb7,0x00,0xd2, - 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xb8,0x00,0x01,0xff,0xe2,0x85,0xb9, - 0x00,0x10,0x08,0x01,0xff,0xe2,0x85,0xba,0x00,0x01,0xff,0xe2,0x85,0xbb,0x00,0xd1, - 0x10,0x10,0x08,0x01,0xff,0xe2,0x85,0xbc,0x00,0x01,0xff,0xe2,0x85,0xbd,0x00,0x10, - 0x08,0x01,0xff,0xe2,0x85,0xbe,0x00,0x01,0xff,0xe2,0x85,0xbf,0x00,0x01,0x00,0xe0, - 0xc9,0x5c,0xcf,0x86,0xe5,0xa8,0x5c,0xe4,0x87,0x5c,0xe3,0x76,0x5c,0xe2,0x69,0x5c, - 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0xff,0xe2,0x86,0x84,0x00,0xe3,0xb8, - 0x60,0xe2,0x85,0x60,0xd1,0x0c,0xe0,0x32,0x60,0xcf,0x86,0x65,0x13,0x60,0x01,0x00, - 0xd0,0x62,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x18,0x52,0x04, - 0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x90,0x00,0x01,0xff, - 0xe2,0x93,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x92,0x00, - 0x01,0xff,0xe2,0x93,0x93,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x94,0x00,0x01,0xff, - 0xe2,0x93,0x95,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2,0x93,0x96,0x00,0x01,0xff, - 0xe2,0x93,0x97,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0x98,0x00,0x01,0xff,0xe2,0x93, - 0x99,0x00,0xcf,0x86,0xe5,0xec,0x5f,0x94,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x01,0xff,0xe2,0x93,0x9a,0x00,0x01,0xff,0xe2,0x93,0x9b,0x00,0x10,0x08,0x01, - 0xff,0xe2,0x93,0x9c,0x00,0x01,0xff,0xe2,0x93,0x9d,0x00,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xe2,0x93,0x9e,0x00,0x01,0xff,0xe2,0x93,0x9f,0x00,0x10,0x08,0x01,0xff,0xe2, - 0x93,0xa0,0x00,0x01,0xff,0xe2,0x93,0xa1,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xe2,0x93,0xa2,0x00,0x01,0xff,0xe2,0x93,0xa3,0x00,0x10,0x08,0x01,0xff,0xe2, - 0x93,0xa4,0x00,0x01,0xff,0xe2,0x93,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe2, - 0x93,0xa6,0x00,0x01,0xff,0xe2,0x93,0xa7,0x00,0x10,0x08,0x01,0xff,0xe2,0x93,0xa8, - 0x00,0x01,0xff,0xe2,0x93,0xa9,0x00,0x01,0x00,0xd4,0x0c,0xe3,0xc8,0x61,0xe2,0xc1, - 0x61,0xcf,0x06,0x04,0x00,0xe3,0xa1,0x64,0xe2,0x94,0x63,0xe1,0x2e,0x02,0xe0,0x84, - 0x01,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x08,0xff,0xe2,0xb0,0xb0,0x00,0x08,0xff,0xe2,0xb0,0xb1,0x00,0x10,0x08,0x08,0xff, - 0xe2,0xb0,0xb2,0x00,0x08,0xff,0xe2,0xb0,0xb3,0x00,0xd1,0x10,0x10,0x08,0x08,0xff, - 0xe2,0xb0,0xb4,0x00,0x08,0xff,0xe2,0xb0,0xb5,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0, - 0xb6,0x00,0x08,0xff,0xe2,0xb0,0xb7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff, - 0xe2,0xb0,0xb8,0x00,0x08,0xff,0xe2,0xb0,0xb9,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0, - 0xba,0x00,0x08,0xff,0xe2,0xb0,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb0, - 0xbc,0x00,0x08,0xff,0xe2,0xb0,0xbd,0x00,0x10,0x08,0x08,0xff,0xe2,0xb0,0xbe,0x00, - 0x08,0xff,0xe2,0xb0,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff, - 0xe2,0xb1,0x80,0x00,0x08,0xff,0xe2,0xb1,0x81,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1, - 0x82,0x00,0x08,0xff,0xe2,0xb1,0x83,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1, - 0x84,0x00,0x08,0xff,0xe2,0xb1,0x85,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x86,0x00, - 0x08,0xff,0xe2,0xb1,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1, - 0x88,0x00,0x08,0xff,0xe2,0xb1,0x89,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8a,0x00, - 0x08,0xff,0xe2,0xb1,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8c,0x00, - 0x08,0xff,0xe2,0xb1,0x8d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x8e,0x00,0x08,0xff, - 0xe2,0xb1,0x8f,0x00,0x94,0x7c,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff, - 0xe2,0xb1,0x90,0x00,0x08,0xff,0xe2,0xb1,0x91,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1, - 0x92,0x00,0x08,0xff,0xe2,0xb1,0x93,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1, - 0x94,0x00,0x08,0xff,0xe2,0xb1,0x95,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x96,0x00, - 0x08,0xff,0xe2,0xb1,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1, - 0x98,0x00,0x08,0xff,0xe2,0xb1,0x99,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9a,0x00, - 0x08,0xff,0xe2,0xb1,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9c,0x00, - 0x08,0xff,0xe2,0xb1,0x9d,0x00,0x10,0x08,0x08,0xff,0xe2,0xb1,0x9e,0x00,0x00,0x00, - 0x08,0x00,0xcf,0x86,0xd5,0x07,0x64,0x84,0x61,0x08,0x00,0xd4,0x63,0xd3,0x32,0xd2, - 0x1b,0xd1,0x0c,0x10,0x08,0x09,0xff,0xe2,0xb1,0xa1,0x00,0x09,0x00,0x10,0x07,0x09, - 0xff,0xc9,0xab,0x00,0x09,0xff,0xe1,0xb5,0xbd,0x00,0xd1,0x0b,0x10,0x07,0x09,0xff, - 0xc9,0xbd,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xa8,0x00,0xd2, - 0x18,0xd1,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2,0xb1,0xaa,0x00,0x10,0x04,0x09, - 0x00,0x09,0xff,0xe2,0xb1,0xac,0x00,0xd1,0x0b,0x10,0x04,0x09,0x00,0x0a,0xff,0xc9, - 0x91,0x00,0x10,0x07,0x0a,0xff,0xc9,0xb1,0x00,0x0a,0xff,0xc9,0x90,0x00,0xd3,0x27, - 0xd2,0x17,0xd1,0x0b,0x10,0x07,0x0b,0xff,0xc9,0x92,0x00,0x0a,0x00,0x10,0x08,0x0a, - 0xff,0xe2,0xb1,0xb3,0x00,0x0a,0x00,0x91,0x0c,0x10,0x04,0x09,0x00,0x09,0xff,0xe2, - 0xb1,0xb6,0x00,0x09,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x07,0x0b, - 0xff,0xc8,0xbf,0x00,0x0b,0xff,0xc9,0x80,0x00,0xe0,0x83,0x01,0xcf,0x86,0xd5,0xc0, - 0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x81,0x00, - 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x83,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08, - 0x08,0xff,0xe2,0xb2,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x87,0x00, - 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x89,0x00,0x08,0x00, - 0x10,0x08,0x08,0xff,0xe2,0xb2,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, - 0xe2,0xb2,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x8f,0x00,0x08,0x00, - 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x91,0x00,0x08,0x00, - 0x10,0x08,0x08,0xff,0xe2,0xb2,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, - 0xe2,0xb2,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x97,0x00,0x08,0x00, - 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0x99,0x00,0x08,0x00,0x10,0x08, - 0x08,0xff,0xe2,0xb2,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2, - 0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0x9f,0x00,0x08,0x00,0xd4,0x60, - 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa1,0x00,0x08,0x00, - 0x10,0x08,0x08,0xff,0xe2,0xb2,0xa3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, - 0xe2,0xb2,0xa5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa7,0x00,0x08,0x00, - 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xa9,0x00,0x08,0x00,0x10,0x08, - 0x08,0xff,0xe2,0xb2,0xab,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2, - 0xad,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xaf,0x00,0x08,0x00,0xd3,0x30, - 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb1,0x00,0x08,0x00,0x10,0x08, - 0x08,0xff,0xe2,0xb2,0xb3,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2, - 0xb5,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb7,0x00,0x08,0x00,0xd2,0x18, - 0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xb9,0x00,0x08,0x00,0x10,0x08,0x08,0xff, - 0xe2,0xb2,0xbb,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbd,0x00, - 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb2,0xbf,0x00,0x08,0x00,0xcf,0x86,0xd5,0xc0, - 0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x81,0x00, - 0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x83,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08, - 0x08,0xff,0xe2,0xb3,0x85,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x87,0x00, - 0x08,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x89,0x00,0x08,0x00, - 0x10,0x08,0x08,0xff,0xe2,0xb3,0x8b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, - 0xe2,0xb3,0x8d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x8f,0x00,0x08,0x00, - 0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x91,0x00,0x08,0x00, - 0x10,0x08,0x08,0xff,0xe2,0xb3,0x93,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff, - 0xe2,0xb3,0x95,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x97,0x00,0x08,0x00, - 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0x99,0x00,0x08,0x00,0x10,0x08, - 0x08,0xff,0xe2,0xb3,0x9b,0x00,0x08,0x00,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3, - 0x9d,0x00,0x08,0x00,0x10,0x08,0x08,0xff,0xe2,0xb3,0x9f,0x00,0x08,0x00,0xd4,0x3b, - 0xd3,0x1c,0x92,0x18,0xd1,0x0c,0x10,0x08,0x08,0xff,0xe2,0xb3,0xa1,0x00,0x08,0x00, - 0x10,0x08,0x08,0xff,0xe2,0xb3,0xa3,0x00,0x08,0x00,0x08,0x00,0xd2,0x10,0x51,0x04, - 0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0xff,0xe2,0xb3,0xac,0x00,0xe1,0xd0,0x5e,0x10, - 0x04,0x0b,0x00,0x0b,0xff,0xe2,0xb3,0xae,0x00,0xe3,0xd5,0x5e,0x92,0x10,0x51,0x04, - 0x0b,0xe6,0x10,0x08,0x0d,0xff,0xe2,0xb3,0xb3,0x00,0x0d,0x00,0x00,0x00,0xe2,0x98, - 0x08,0xd1,0x0b,0xe0,0x8d,0x66,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe0,0xe1,0x6b,0xcf, - 0x86,0xe5,0xa7,0x05,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x0c,0xe2,0x74,0x67,0xe1, - 0x0b,0x67,0xcf,0x06,0x04,0x00,0xe2,0xdb,0x01,0xe1,0x26,0x01,0xd0,0x09,0xcf,0x86, - 0x65,0x70,0x67,0x0a,0x00,0xcf,0x86,0xd5,0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1, - 0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea, - 0x99,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x85,0x00,0x0a, - 0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10, - 0x08,0x0a,0xff,0xea,0x99,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x8b, - 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x8d,0x00,0x0a,0x00,0x10, - 0x08,0x0a,0xff,0xea,0x99,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10, - 0x08,0x0a,0xff,0xea,0x99,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x93, - 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x95,0x00,0x0a,0x00,0x10, - 0x08,0x0a,0xff,0xea,0x99,0x97,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a, - 0xff,0xea,0x99,0x99,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0x9b,0x00,0x0a, - 0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a, - 0xff,0xea,0x99,0x9f,0x00,0x0a,0x00,0xe4,0xd9,0x66,0xd3,0x30,0xd2,0x18,0xd1,0x0c, - 0x10,0x08,0x0c,0xff,0xea,0x99,0xa1,0x00,0x0c,0x00,0x10,0x08,0x0a,0xff,0xea,0x99, - 0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x99,0xa5,0x00,0x0a,0x00, - 0x10,0x08,0x0a,0xff,0xea,0x99,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08, - 0x0a,0xff,0xea,0x99,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x99,0xab,0x00, - 0x0a,0x00,0xe1,0x88,0x66,0x10,0x08,0x0a,0xff,0xea,0x99,0xad,0x00,0x0a,0x00,0xe0, - 0xb1,0x66,0xcf,0x86,0x95,0xab,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08, - 0x0a,0xff,0xea,0x9a,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x83,0x00, - 0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x85,0x00,0x0a,0x00,0x10,0x08, - 0x0a,0xff,0xea,0x9a,0x87,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff, - 0xea,0x9a,0x89,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8b,0x00,0x0a,0x00, - 0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff, - 0xea,0x9a,0x8f,0x00,0x0a,0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff, - 0xea,0x9a,0x91,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9a,0x93,0x00,0x0a,0x00, - 0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9a,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff, - 0xea,0x9a,0x97,0x00,0x0a,0x00,0xe2,0x0e,0x66,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea, - 0x9a,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9a,0x9b,0x00,0x10,0x00,0x0b, - 0x00,0xe1,0x10,0x02,0xd0,0xb9,0xcf,0x86,0xd5,0x07,0x64,0x1a,0x66,0x08,0x00,0xd4, - 0x58,0xd3,0x28,0xd2,0x10,0x51,0x04,0x09,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa3, - 0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xa5,0x00,0x0a,0x00,0x10, - 0x08,0x0a,0xff,0xea,0x9c,0xa7,0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a, - 0xff,0xea,0x9c,0xa9,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xab,0x00,0x0a, - 0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a, - 0xff,0xea,0x9c,0xaf,0x00,0x0a,0x00,0xd3,0x28,0xd2,0x10,0x51,0x04,0x0a,0x00,0x10, - 0x08,0x0a,0xff,0xea,0x9c,0xb3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea, - 0x9c,0xb5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb7,0x00,0x0a,0x00,0xd2, - 0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xb9,0x00,0x0a,0x00,0x10,0x08,0x0a, - 0xff,0xea,0x9c,0xbb,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbd, - 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9c,0xbf,0x00,0x0a,0x00,0xcf,0x86,0xd5, - 0xc0,0xd4,0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x81, - 0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10, - 0x08,0x0a,0xff,0xea,0x9d,0x85,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x87, - 0x00,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x89,0x00,0x0a, - 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a, - 0xff,0xea,0x9d,0x8d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x8f,0x00,0x0a, - 0x00,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x91,0x00,0x0a, - 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x93,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a, - 0xff,0xea,0x9d,0x95,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x97,0x00,0x0a, - 0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0x99,0x00,0x0a,0x00,0x10, - 0x08,0x0a,0xff,0xea,0x9d,0x9b,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea, - 0x9d,0x9d,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0x9f,0x00,0x0a,0x00,0xd4, - 0x60,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa1,0x00,0x0a, - 0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa3,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a, - 0xff,0xea,0x9d,0xa5,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa7,0x00,0x0a, - 0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9d,0xa9,0x00,0x0a,0x00,0x10, - 0x08,0x0a,0xff,0xea,0x9d,0xab,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea, - 0x9d,0xad,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xaf,0x00,0x0a,0x00,0x53, - 0x04,0x0a,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xba, - 0x00,0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9d,0xbc,0x00,0xd1,0x0c,0x10,0x04,0x0a, - 0x00,0x0a,0xff,0xe1,0xb5,0xb9,0x00,0x10,0x08,0x0a,0xff,0xea,0x9d,0xbf,0x00,0x0a, - 0x00,0xe0,0x71,0x01,0xcf,0x86,0xd5,0xa6,0xd4,0x4e,0xd3,0x30,0xd2,0x18,0xd1,0x0c, - 0x10,0x08,0x0a,0xff,0xea,0x9e,0x81,0x00,0x0a,0x00,0x10,0x08,0x0a,0xff,0xea,0x9e, - 0x83,0x00,0x0a,0x00,0xd1,0x0c,0x10,0x08,0x0a,0xff,0xea,0x9e,0x85,0x00,0x0a,0x00, - 0x10,0x08,0x0a,0xff,0xea,0x9e,0x87,0x00,0x0a,0x00,0xd2,0x10,0x51,0x04,0x0a,0x00, - 0x10,0x04,0x0a,0x00,0x0a,0xff,0xea,0x9e,0x8c,0x00,0xe1,0x16,0x64,0x10,0x04,0x0a, - 0x00,0x0c,0xff,0xc9,0xa5,0x00,0xd3,0x28,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff, - 0xea,0x9e,0x91,0x00,0x0c,0x00,0x10,0x08,0x0d,0xff,0xea,0x9e,0x93,0x00,0x0d,0x00, - 0x51,0x04,0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x97,0x00,0x10,0x00,0xd2,0x18, - 0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x99,0x00,0x10,0x00,0x10,0x08,0x10,0xff, - 0xea,0x9e,0x9b,0x00,0x10,0x00,0xd1,0x0c,0x10,0x08,0x10,0xff,0xea,0x9e,0x9d,0x00, - 0x10,0x00,0x10,0x08,0x10,0xff,0xea,0x9e,0x9f,0x00,0x10,0x00,0xd4,0x63,0xd3,0x30, - 0xd2,0x18,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa1,0x00,0x0c,0x00,0x10,0x08, - 0x0c,0xff,0xea,0x9e,0xa3,0x00,0x0c,0x00,0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e, - 0xa5,0x00,0x0c,0x00,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa7,0x00,0x0c,0x00,0xd2,0x1a, - 0xd1,0x0c,0x10,0x08,0x0c,0xff,0xea,0x9e,0xa9,0x00,0x0c,0x00,0x10,0x07,0x0d,0xff, - 0xc9,0xa6,0x00,0x10,0xff,0xc9,0x9c,0x00,0xd1,0x0e,0x10,0x07,0x10,0xff,0xc9,0xa1, - 0x00,0x10,0xff,0xc9,0xac,0x00,0x10,0x07,0x12,0xff,0xc9,0xaa,0x00,0x14,0x00,0xd3, - 0x35,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x10,0xff,0xca,0x9e,0x00,0x10,0xff,0xca,0x87, - 0x00,0x10,0x07,0x11,0xff,0xca,0x9d,0x00,0x11,0xff,0xea,0xad,0x93,0x00,0xd1,0x0c, - 0x10,0x08,0x11,0xff,0xea,0x9e,0xb5,0x00,0x11,0x00,0x10,0x08,0x11,0xff,0xea,0x9e, - 0xb7,0x00,0x11,0x00,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x14,0xff,0xea,0x9e,0xb9,0x00, - 0x14,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbb,0x00,0x15,0x00,0xd1,0x0c,0x10,0x08, - 0x15,0xff,0xea,0x9e,0xbd,0x00,0x15,0x00,0x10,0x08,0x15,0xff,0xea,0x9e,0xbf,0x00, - 0x15,0x00,0xcf,0x86,0xe5,0x50,0x63,0x94,0x2f,0x93,0x2b,0xd2,0x10,0x51,0x04,0x00, - 0x00,0x10,0x08,0x15,0xff,0xea,0x9f,0x83,0x00,0x15,0x00,0xd1,0x0f,0x10,0x08,0x15, - 0xff,0xea,0x9e,0x94,0x00,0x15,0xff,0xca,0x82,0x00,0x10,0x08,0x15,0xff,0xe1,0xb6, - 0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe4,0x30,0x66,0xd3,0x1d,0xe2,0xd7,0x63, - 0xe1,0x86,0x63,0xe0,0x73,0x63,0xcf,0x86,0xe5,0x54,0x63,0x94,0x0b,0x93,0x07,0x62, - 0x3f,0x63,0x08,0x00,0x08,0x00,0x08,0x00,0xd2,0x0f,0xe1,0xd6,0x64,0xe0,0xa3,0x64, - 0xcf,0x86,0x65,0x88,0x64,0x0a,0x00,0xd1,0xab,0xd0,0x1a,0xcf,0x86,0xe5,0x93,0x65, - 0xe4,0x76,0x65,0xe3,0x5d,0x65,0xe2,0x50,0x65,0x91,0x08,0x10,0x04,0x00,0x00,0x0c, - 0x00,0x0c,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x0b,0x93,0x07,0x62,0xa3,0x65, - 0x11,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e, - 0xa0,0x00,0x11,0xff,0xe1,0x8e,0xa1,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa2,0x00, - 0x11,0xff,0xe1,0x8e,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa4,0x00, - 0x11,0xff,0xe1,0x8e,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa6,0x00,0x11,0xff, - 0xe1,0x8e,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xa8,0x00, - 0x11,0xff,0xe1,0x8e,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xaa,0x00,0x11,0xff, - 0xe1,0x8e,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xac,0x00,0x11,0xff, - 0xe1,0x8e,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8e,0xae,0x00,0x11,0xff,0xe1,0x8e, - 0xaf,0x00,0xe0,0x2e,0x65,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20, - 0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8e,0xb0,0x00,0x11,0xff,0xe1,0x8e,0xb1,0x00, - 0x10,0x08,0x11,0xff,0xe1,0x8e,0xb2,0x00,0x11,0xff,0xe1,0x8e,0xb3,0x00,0xd1,0x10, - 0x10,0x08,0x11,0xff,0xe1,0x8e,0xb4,0x00,0x11,0xff,0xe1,0x8e,0xb5,0x00,0x10,0x08, - 0x11,0xff,0xe1,0x8e,0xb6,0x00,0x11,0xff,0xe1,0x8e,0xb7,0x00,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x11,0xff,0xe1,0x8e,0xb8,0x00,0x11,0xff,0xe1,0x8e,0xb9,0x00,0x10,0x08, - 0x11,0xff,0xe1,0x8e,0xba,0x00,0x11,0xff,0xe1,0x8e,0xbb,0x00,0xd1,0x10,0x10,0x08, - 0x11,0xff,0xe1,0x8e,0xbc,0x00,0x11,0xff,0xe1,0x8e,0xbd,0x00,0x10,0x08,0x11,0xff, - 0xe1,0x8e,0xbe,0x00,0x11,0xff,0xe1,0x8e,0xbf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x11,0xff,0xe1,0x8f,0x80,0x00,0x11,0xff,0xe1,0x8f,0x81,0x00,0x10,0x08, - 0x11,0xff,0xe1,0x8f,0x82,0x00,0x11,0xff,0xe1,0x8f,0x83,0x00,0xd1,0x10,0x10,0x08, - 0x11,0xff,0xe1,0x8f,0x84,0x00,0x11,0xff,0xe1,0x8f,0x85,0x00,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0x86,0x00,0x11,0xff,0xe1,0x8f,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x11,0xff,0xe1,0x8f,0x88,0x00,0x11,0xff,0xe1,0x8f,0x89,0x00,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0x8a,0x00,0x11,0xff,0xe1,0x8f,0x8b,0x00,0xd1,0x10,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0x8c,0x00,0x11,0xff,0xe1,0x8f,0x8d,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f, - 0x8e,0x00,0x11,0xff,0xe1,0x8f,0x8f,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x11,0xff,0xe1,0x8f,0x90,0x00,0x11,0xff,0xe1,0x8f,0x91,0x00,0x10,0x08, - 0x11,0xff,0xe1,0x8f,0x92,0x00,0x11,0xff,0xe1,0x8f,0x93,0x00,0xd1,0x10,0x10,0x08, - 0x11,0xff,0xe1,0x8f,0x94,0x00,0x11,0xff,0xe1,0x8f,0x95,0x00,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0x96,0x00,0x11,0xff,0xe1,0x8f,0x97,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x11,0xff,0xe1,0x8f,0x98,0x00,0x11,0xff,0xe1,0x8f,0x99,0x00,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0x9a,0x00,0x11,0xff,0xe1,0x8f,0x9b,0x00,0xd1,0x10,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0x9c,0x00,0x11,0xff,0xe1,0x8f,0x9d,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f, - 0x9e,0x00,0x11,0xff,0xe1,0x8f,0x9f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x11,0xff,0xe1,0x8f,0xa0,0x00,0x11,0xff,0xe1,0x8f,0xa1,0x00,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0xa2,0x00,0x11,0xff,0xe1,0x8f,0xa3,0x00,0xd1,0x10,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0xa4,0x00,0x11,0xff,0xe1,0x8f,0xa5,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f, - 0xa6,0x00,0x11,0xff,0xe1,0x8f,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x11,0xff, - 0xe1,0x8f,0xa8,0x00,0x11,0xff,0xe1,0x8f,0xa9,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f, - 0xaa,0x00,0x11,0xff,0xe1,0x8f,0xab,0x00,0xd1,0x10,0x10,0x08,0x11,0xff,0xe1,0x8f, - 0xac,0x00,0x11,0xff,0xe1,0x8f,0xad,0x00,0x10,0x08,0x11,0xff,0xe1,0x8f,0xae,0x00, - 0x11,0xff,0xe1,0x8f,0xaf,0x00,0xd1,0x0c,0xe0,0x67,0x63,0xcf,0x86,0xcf,0x06,0x02, - 0xff,0xff,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06, - 0x01,0x00,0xd4,0xae,0xd3,0x09,0xe2,0xd0,0x63,0xcf,0x06,0x01,0x00,0xd2,0x27,0xe1, - 0x9b,0x6f,0xe0,0xa2,0x6d,0xcf,0x86,0xe5,0xbb,0x6c,0xe4,0x4a,0x6c,0xe3,0x15,0x6c, - 0xe2,0xf4,0x6b,0xe1,0xe3,0x6b,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00,0x01,0xff, - 0xe5,0xba,0xa6,0x00,0xe1,0xf0,0x73,0xe0,0x64,0x73,0xcf,0x86,0xe5,0x9e,0x72,0xd4, - 0x3b,0x93,0x37,0xd2,0x1d,0xd1,0x0e,0x10,0x07,0x01,0xff,0x66,0x66,0x00,0x01,0xff, - 0x66,0x69,0x00,0x10,0x07,0x01,0xff,0x66,0x6c,0x00,0x01,0xff,0x66,0x66,0x69,0x00, - 0xd1,0x0f,0x10,0x08,0x01,0xff,0x66,0x66,0x6c,0x00,0x01,0xff,0x73,0x74,0x00,0x10, - 0x07,0x01,0xff,0x73,0x74,0x00,0x00,0x00,0x00,0x00,0xe3,0x44,0x72,0xd2,0x11,0x51, - 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xb6,0x00,0xd1,0x12, - 0x10,0x09,0x01,0xff,0xd5,0xb4,0xd5,0xa5,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xab,0x00, - 0x10,0x09,0x01,0xff,0xd5,0xbe,0xd5,0xb6,0x00,0x01,0xff,0xd5,0xb4,0xd5,0xad,0x00, - 0xd3,0x09,0xe2,0xbc,0x73,0xcf,0x06,0x01,0x00,0xd2,0x12,0xe1,0xab,0x74,0xe0,0x3c, - 0x74,0xcf,0x86,0xe5,0x19,0x74,0x64,0x08,0x74,0x06,0x00,0xe1,0x11,0x75,0xe0,0xde, - 0x74,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00, - 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x7c,0xd3,0x3c,0xd2, - 0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xef,0xbd,0x81,0x00,0x10,0x08,0x01, - 0xff,0xef,0xbd,0x82,0x00,0x01,0xff,0xef,0xbd,0x83,0x00,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xef,0xbd,0x84,0x00,0x01,0xff,0xef,0xbd,0x85,0x00,0x10,0x08,0x01,0xff,0xef, - 0xbd,0x86,0x00,0x01,0xff,0xef,0xbd,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xef,0xbd,0x88,0x00,0x01,0xff,0xef,0xbd,0x89,0x00,0x10,0x08,0x01,0xff,0xef, - 0xbd,0x8a,0x00,0x01,0xff,0xef,0xbd,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xef, - 0xbd,0x8c,0x00,0x01,0xff,0xef,0xbd,0x8d,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x8e, - 0x00,0x01,0xff,0xef,0xbd,0x8f,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xef,0xbd,0x90,0x00,0x01,0xff,0xef,0xbd,0x91,0x00,0x10,0x08,0x01,0xff,0xef, - 0xbd,0x92,0x00,0x01,0xff,0xef,0xbd,0x93,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xef, - 0xbd,0x94,0x00,0x01,0xff,0xef,0xbd,0x95,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x96, - 0x00,0x01,0xff,0xef,0xbd,0x97,0x00,0x92,0x1c,0xd1,0x10,0x10,0x08,0x01,0xff,0xef, - 0xbd,0x98,0x00,0x01,0xff,0xef,0xbd,0x99,0x00,0x10,0x08,0x01,0xff,0xef,0xbd,0x9a, - 0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0xd9,0xb2,0xe1,0xc3,0xaf,0xe0,0x40,0xae,0xcf, - 0x86,0xe5,0xe4,0x9a,0xc4,0xe3,0xc1,0x07,0xe2,0x62,0x06,0xe1,0x79,0x85,0xe0,0x09, - 0x05,0xcf,0x86,0xe5,0xfb,0x02,0xd4,0x1c,0xe3,0xe7,0x75,0xe2,0x3e,0x75,0xe1,0x19, - 0x75,0xe0,0xf2,0x74,0xcf,0x86,0xe5,0xbf,0x74,0x94,0x07,0x63,0xaa,0x74,0x07,0x00, - 0x07,0x00,0xe3,0x93,0x77,0xe2,0x58,0x77,0xe1,0x77,0x01,0xe0,0xf0,0x76,0xcf,0x86, - 0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0, - 0x90,0x90,0xa8,0x00,0x05,0xff,0xf0,0x90,0x90,0xa9,0x00,0x10,0x09,0x05,0xff,0xf0, - 0x90,0x90,0xaa,0x00,0x05,0xff,0xf0,0x90,0x90,0xab,0x00,0xd1,0x12,0x10,0x09,0x05, - 0xff,0xf0,0x90,0x90,0xac,0x00,0x05,0xff,0xf0,0x90,0x90,0xad,0x00,0x10,0x09,0x05, - 0xff,0xf0,0x90,0x90,0xae,0x00,0x05,0xff,0xf0,0x90,0x90,0xaf,0x00,0xd2,0x24,0xd1, - 0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb0,0x00,0x05,0xff,0xf0,0x90,0x90,0xb1, - 0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb2,0x00,0x05,0xff,0xf0,0x90,0x90,0xb3, - 0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb4,0x00,0x05,0xff,0xf0,0x90, - 0x90,0xb5,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90,0xb6,0x00,0x05,0xff,0xf0,0x90, - 0x90,0xb7,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x90, - 0xb8,0x00,0x05,0xff,0xf0,0x90,0x90,0xb9,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x90, - 0xba,0x00,0x05,0xff,0xf0,0x90,0x90,0xbb,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0, - 0x90,0x90,0xbc,0x00,0x05,0xff,0xf0,0x90,0x90,0xbd,0x00,0x10,0x09,0x05,0xff,0xf0, - 0x90,0x90,0xbe,0x00,0x05,0xff,0xf0,0x90,0x90,0xbf,0x00,0xd2,0x24,0xd1,0x12,0x10, - 0x09,0x05,0xff,0xf0,0x90,0x91,0x80,0x00,0x05,0xff,0xf0,0x90,0x91,0x81,0x00,0x10, - 0x09,0x05,0xff,0xf0,0x90,0x91,0x82,0x00,0x05,0xff,0xf0,0x90,0x91,0x83,0x00,0xd1, - 0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x84,0x00,0x05,0xff,0xf0,0x90,0x91,0x85, - 0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91,0x86,0x00,0x05,0xff,0xf0,0x90,0x91,0x87, - 0x00,0x94,0x4c,0x93,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0,0x90,0x91, - 0x88,0x00,0x05,0xff,0xf0,0x90,0x91,0x89,0x00,0x10,0x09,0x05,0xff,0xf0,0x90,0x91, - 0x8a,0x00,0x05,0xff,0xf0,0x90,0x91,0x8b,0x00,0xd1,0x12,0x10,0x09,0x05,0xff,0xf0, - 0x90,0x91,0x8c,0x00,0x05,0xff,0xf0,0x90,0x91,0x8d,0x00,0x10,0x09,0x07,0xff,0xf0, - 0x90,0x91,0x8e,0x00,0x07,0xff,0xf0,0x90,0x91,0x8f,0x00,0x05,0x00,0x05,0x00,0xd0, - 0xa0,0xcf,0x86,0xd5,0x07,0x64,0x98,0x75,0x07,0x00,0xd4,0x07,0x63,0xa5,0x75,0x07, - 0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0x98,0x00, - 0x12,0xff,0xf0,0x90,0x93,0x99,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0x9a,0x00, - 0x12,0xff,0xf0,0x90,0x93,0x9b,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93, - 0x9c,0x00,0x12,0xff,0xf0,0x90,0x93,0x9d,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93, - 0x9e,0x00,0x12,0xff,0xf0,0x90,0x93,0x9f,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12, - 0xff,0xf0,0x90,0x93,0xa0,0x00,0x12,0xff,0xf0,0x90,0x93,0xa1,0x00,0x10,0x09,0x12, - 0xff,0xf0,0x90,0x93,0xa2,0x00,0x12,0xff,0xf0,0x90,0x93,0xa3,0x00,0xd1,0x12,0x10, - 0x09,0x12,0xff,0xf0,0x90,0x93,0xa4,0x00,0x12,0xff,0xf0,0x90,0x93,0xa5,0x00,0x10, - 0x09,0x12,0xff,0xf0,0x90,0x93,0xa6,0x00,0x12,0xff,0xf0,0x90,0x93,0xa7,0x00,0xcf, - 0x86,0xe5,0x2e,0x75,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff, - 0xf0,0x90,0x93,0xa8,0x00,0x12,0xff,0xf0,0x90,0x93,0xa9,0x00,0x10,0x09,0x12,0xff, - 0xf0,0x90,0x93,0xaa,0x00,0x12,0xff,0xf0,0x90,0x93,0xab,0x00,0xd1,0x12,0x10,0x09, - 0x12,0xff,0xf0,0x90,0x93,0xac,0x00,0x12,0xff,0xf0,0x90,0x93,0xad,0x00,0x10,0x09, - 0x12,0xff,0xf0,0x90,0x93,0xae,0x00,0x12,0xff,0xf0,0x90,0x93,0xaf,0x00,0xd2,0x24, - 0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb0,0x00,0x12,0xff,0xf0,0x90,0x93, - 0xb1,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb2,0x00,0x12,0xff,0xf0,0x90,0x93, - 0xb3,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb4,0x00,0x12,0xff,0xf0, - 0x90,0x93,0xb5,0x00,0x10,0x09,0x12,0xff,0xf0,0x90,0x93,0xb6,0x00,0x12,0xff,0xf0, - 0x90,0x93,0xb7,0x00,0x93,0x28,0x92,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x90, - 0x93,0xb8,0x00,0x12,0xff,0xf0,0x90,0x93,0xb9,0x00,0x10,0x09,0x12,0xff,0xf0,0x90, - 0x93,0xba,0x00,0x12,0xff,0xf0,0x90,0x93,0xbb,0x00,0x00,0x00,0x12,0x00,0xd4,0x1f, - 0xe3,0x47,0x76,0xe2,0xd2,0x75,0xe1,0x71,0x75,0xe0,0x52,0x75,0xcf,0x86,0xe5,0x1f, - 0x75,0x94,0x0a,0xe3,0x0a,0x75,0x62,0x01,0x75,0x07,0x00,0x07,0x00,0xe3,0x46,0x78, - 0xe2,0x17,0x78,0xd1,0x09,0xe0,0xb4,0x77,0xcf,0x06,0x0b,0x00,0xe0,0xe7,0x77,0xcf, - 0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff, - 0xf0,0x90,0xb3,0x80,0x00,0x11,0xff,0xf0,0x90,0xb3,0x81,0x00,0x10,0x09,0x11,0xff, - 0xf0,0x90,0xb3,0x82,0x00,0x11,0xff,0xf0,0x90,0xb3,0x83,0x00,0xd1,0x12,0x10,0x09, - 0x11,0xff,0xf0,0x90,0xb3,0x84,0x00,0x11,0xff,0xf0,0x90,0xb3,0x85,0x00,0x10,0x09, - 0x11,0xff,0xf0,0x90,0xb3,0x86,0x00,0x11,0xff,0xf0,0x90,0xb3,0x87,0x00,0xd2,0x24, - 0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x88,0x00,0x11,0xff,0xf0,0x90,0xb3, - 0x89,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8a,0x00,0x11,0xff,0xf0,0x90,0xb3, - 0x8b,0x00,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8c,0x00,0x11,0xff,0xf0, - 0x90,0xb3,0x8d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x8e,0x00,0x11,0xff,0xf0, - 0x90,0xb3,0x8f,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90, - 0xb3,0x90,0x00,0x11,0xff,0xf0,0x90,0xb3,0x91,0x00,0x10,0x09,0x11,0xff,0xf0,0x90, - 0xb3,0x92,0x00,0x11,0xff,0xf0,0x90,0xb3,0x93,0x00,0xd1,0x12,0x10,0x09,0x11,0xff, - 0xf0,0x90,0xb3,0x94,0x00,0x11,0xff,0xf0,0x90,0xb3,0x95,0x00,0x10,0x09,0x11,0xff, - 0xf0,0x90,0xb3,0x96,0x00,0x11,0xff,0xf0,0x90,0xb3,0x97,0x00,0xd2,0x24,0xd1,0x12, - 0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x98,0x00,0x11,0xff,0xf0,0x90,0xb3,0x99,0x00, - 0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9a,0x00,0x11,0xff,0xf0,0x90,0xb3,0x9b,0x00, - 0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9c,0x00,0x11,0xff,0xf0,0x90,0xb3, - 0x9d,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0x9e,0x00,0x11,0xff,0xf0,0x90,0xb3, - 0x9f,0x00,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90, - 0xb3,0xa0,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa1,0x00,0x10,0x09,0x11,0xff,0xf0,0x90, - 0xb3,0xa2,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa3,0x00,0xd1,0x12,0x10,0x09,0x11,0xff, - 0xf0,0x90,0xb3,0xa4,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa5,0x00,0x10,0x09,0x11,0xff, - 0xf0,0x90,0xb3,0xa6,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa7,0x00,0xd2,0x24,0xd1,0x12, - 0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xa8,0x00,0x11,0xff,0xf0,0x90,0xb3,0xa9,0x00, - 0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xaa,0x00,0x11,0xff,0xf0,0x90,0xb3,0xab,0x00, - 0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xac,0x00,0x11,0xff,0xf0,0x90,0xb3, - 0xad,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xae,0x00,0x11,0xff,0xf0,0x90,0xb3, - 0xaf,0x00,0x93,0x23,0x92,0x1f,0xd1,0x12,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xb0, - 0x00,0x11,0xff,0xf0,0x90,0xb3,0xb1,0x00,0x10,0x09,0x11,0xff,0xf0,0x90,0xb3,0xb2, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x15,0xe4,0xf9,0x7a,0xe3,0x03, - 0x79,0xe2,0xfc,0x77,0xe1,0x4c,0x77,0xe0,0x05,0x77,0xcf,0x06,0x0c,0x00,0xe4,0x53, - 0x7e,0xe3,0xac,0x7d,0xe2,0x55,0x7d,0xd1,0x0c,0xe0,0x1a,0x7d,0xcf,0x86,0x65,0xfb, - 0x7c,0x14,0x00,0xe0,0x1e,0x7d,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x90,0xd3,0x48, - 0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x80,0x00,0x10,0xff,0xf0, - 0x91,0xa3,0x81,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x82,0x00,0x10,0xff,0xf0, - 0x91,0xa3,0x83,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x84,0x00,0x10, - 0xff,0xf0,0x91,0xa3,0x85,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x86,0x00,0x10, - 0xff,0xf0,0x91,0xa3,0x87,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91, - 0xa3,0x88,0x00,0x10,0xff,0xf0,0x91,0xa3,0x89,0x00,0x10,0x09,0x10,0xff,0xf0,0x91, - 0xa3,0x8a,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8b,0x00,0xd1,0x12,0x10,0x09,0x10,0xff, - 0xf0,0x91,0xa3,0x8c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8d,0x00,0x10,0x09,0x10,0xff, - 0xf0,0x91,0xa3,0x8e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x8f,0x00,0xd3,0x48,0xd2,0x24, - 0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x90,0x00,0x10,0xff,0xf0,0x91,0xa3, - 0x91,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x92,0x00,0x10,0xff,0xf0,0x91,0xa3, - 0x93,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x94,0x00,0x10,0xff,0xf0, - 0x91,0xa3,0x95,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x96,0x00,0x10,0xff,0xf0, - 0x91,0xa3,0x97,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x98, - 0x00,0x10,0xff,0xf0,0x91,0xa3,0x99,0x00,0x10,0x09,0x10,0xff,0xf0,0x91,0xa3,0x9a, - 0x00,0x10,0xff,0xf0,0x91,0xa3,0x9b,0x00,0xd1,0x12,0x10,0x09,0x10,0xff,0xf0,0x91, - 0xa3,0x9c,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9d,0x00,0x10,0x09,0x10,0xff,0xf0,0x91, - 0xa3,0x9e,0x00,0x10,0xff,0xf0,0x91,0xa3,0x9f,0x00,0xd1,0x11,0xe0,0x7a,0x80,0xcf, - 0x86,0xe5,0x71,0x80,0xe4,0x3a,0x80,0xcf,0x06,0x00,0x00,0xe0,0x43,0x82,0xcf,0x86, - 0xd5,0x06,0xcf,0x06,0x00,0x00,0xd4,0x09,0xe3,0x78,0x80,0xcf,0x06,0x0c,0x00,0xd3, - 0x06,0xcf,0x06,0x00,0x00,0xe2,0xa3,0x81,0xe1,0x7e,0x81,0xd0,0x06,0xcf,0x06,0x00, - 0x00,0xcf,0x86,0xa5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09, - 0x14,0xff,0xf0,0x96,0xb9,0xa0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa1,0x00,0x10,0x09, - 0x14,0xff,0xf0,0x96,0xb9,0xa2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa3,0x00,0xd1,0x12, - 0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa5,0x00, - 0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xa7,0x00, - 0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xa8,0x00,0x14,0xff,0xf0, - 0x96,0xb9,0xa9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xaa,0x00,0x14,0xff,0xf0, - 0x96,0xb9,0xab,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xac,0x00,0x14, - 0xff,0xf0,0x96,0xb9,0xad,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xae,0x00,0x14, - 0xff,0xf0,0x96,0xb9,0xaf,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x14,0xff, - 0xf0,0x96,0xb9,0xb0,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb1,0x00,0x10,0x09,0x14,0xff, - 0xf0,0x96,0xb9,0xb2,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb3,0x00,0xd1,0x12,0x10,0x09, - 0x14,0xff,0xf0,0x96,0xb9,0xb4,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb5,0x00,0x10,0x09, - 0x14,0xff,0xf0,0x96,0xb9,0xb6,0x00,0x14,0xff,0xf0,0x96,0xb9,0xb7,0x00,0xd2,0x24, - 0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xb8,0x00,0x14,0xff,0xf0,0x96,0xb9, - 0xb9,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xba,0x00,0x14,0xff,0xf0,0x96,0xb9, - 0xbb,0x00,0xd1,0x12,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbc,0x00,0x14,0xff,0xf0, - 0x96,0xb9,0xbd,0x00,0x10,0x09,0x14,0xff,0xf0,0x96,0xb9,0xbe,0x00,0x14,0xff,0xf0, - 0x96,0xb9,0xbf,0x00,0x14,0x00,0xd2,0x14,0xe1,0x8d,0x81,0xe0,0x84,0x81,0xcf,0x86, - 0xe5,0x45,0x81,0xe4,0x02,0x81,0xcf,0x06,0x12,0x00,0xd1,0x0b,0xe0,0xb8,0x82,0xcf, - 0x86,0xcf,0x06,0x00,0x00,0xe0,0xf8,0x8a,0xcf,0x86,0xd5,0x22,0xe4,0x33,0x88,0xe3, - 0xf6,0x87,0xe2,0x9b,0x87,0xe1,0x94,0x87,0xe0,0x8d,0x87,0xcf,0x86,0xe5,0x5e,0x87, - 0xe4,0x45,0x87,0x93,0x07,0x62,0x34,0x87,0x12,0xe6,0x12,0xe6,0xe4,0x99,0x88,0xe3, - 0x92,0x88,0xd2,0x09,0xe1,0x1b,0x88,0xcf,0x06,0x10,0x00,0xe1,0x82,0x88,0xe0,0x4f, - 0x88,0xcf,0x86,0xe5,0x21,0x01,0xd4,0x90,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09, - 0x12,0xff,0xf0,0x9e,0xa4,0xa2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa3,0x00,0x10,0x09, - 0x12,0xff,0xf0,0x9e,0xa4,0xa4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa5,0x00,0xd1,0x12, - 0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa7,0x00, - 0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xa8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xa9,0x00, - 0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xaa,0x00,0x12,0xff,0xf0, - 0x9e,0xa4,0xab,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xac,0x00,0x12,0xff,0xf0, - 0x9e,0xa4,0xad,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xae,0x00,0x12, - 0xff,0xf0,0x9e,0xa4,0xaf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xb0,0x00,0x12, - 0xff,0xf0,0x9e,0xa4,0xb1,0x00,0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x12,0xff, - 0xf0,0x9e,0xa4,0xb2,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb3,0x00,0x10,0x09,0x12,0xff, - 0xf0,0x9e,0xa4,0xb4,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb5,0x00,0xd1,0x12,0x10,0x09, - 0x12,0xff,0xf0,0x9e,0xa4,0xb6,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb7,0x00,0x10,0x09, - 0x12,0xff,0xf0,0x9e,0xa4,0xb8,0x00,0x12,0xff,0xf0,0x9e,0xa4,0xb9,0x00,0xd2,0x24, - 0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xba,0x00,0x12,0xff,0xf0,0x9e,0xa4, - 0xbb,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbc,0x00,0x12,0xff,0xf0,0x9e,0xa4, - 0xbd,0x00,0xd1,0x12,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa4,0xbe,0x00,0x12,0xff,0xf0, - 0x9e,0xa4,0xbf,0x00,0x10,0x09,0x12,0xff,0xf0,0x9e,0xa5,0x80,0x00,0x12,0xff,0xf0, - 0x9e,0xa5,0x81,0x00,0x94,0x1e,0x93,0x1a,0x92,0x16,0x91,0x12,0x10,0x09,0x12,0xff, - 0xf0,0x9e,0xa5,0x82,0x00,0x12,0xff,0xf0,0x9e,0xa5,0x83,0x00,0x12,0x00,0x12,0x00, - 0x12,0x00,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - /* nfdi_c0100 */ - 0x57,0x04,0x01,0x00,0xc6,0xe5,0x91,0x13,0xe4,0x27,0x0c,0xe3,0x61,0x07,0xe2,0xda, - 0x01,0xc1,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0xe4,0xd4,0x7c,0xd3,0x3c, - 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x80,0x00,0x01,0xff,0x41,0xcc, - 0x81,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x82,0x00,0x01,0xff,0x41,0xcc,0x83,0x00, - 0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x88,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00, - 0x10,0x04,0x01,0x00,0x01,0xff,0x43,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x45,0xcc,0x80,0x00,0x01,0xff,0x45,0xcc,0x81,0x00,0x10,0x08,0x01,0xff, - 0x45,0xcc,0x82,0x00,0x01,0xff,0x45,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x49,0xcc,0x80,0x00,0x01,0xff,0x49,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x49,0xcc, - 0x82,0x00,0x01,0xff,0x49,0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04, - 0x01,0x00,0x01,0xff,0x4e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x80,0x00, - 0x01,0xff,0x4f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x82,0x00, - 0x01,0xff,0x4f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x88,0x00,0x01,0x00, - 0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x55,0xcc,0x80,0x00,0x10,0x08, - 0x01,0xff,0x55,0xcc,0x81,0x00,0x01,0xff,0x55,0xcc,0x82,0x00,0x91,0x10,0x10,0x08, - 0x01,0xff,0x55,0xcc,0x88,0x00,0x01,0xff,0x59,0xcc,0x81,0x00,0x01,0x00,0xd4,0x7c, - 0xd3,0x3c,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x80,0x00,0x01,0xff, - 0x61,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x82,0x00,0x01,0xff,0x61,0xcc, - 0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x61,0xcc,0x88,0x00,0x01,0xff,0x61,0xcc, - 0x8a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x63,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x65,0xcc,0x80,0x00,0x01,0xff,0x65,0xcc,0x81,0x00,0x10,0x08, - 0x01,0xff,0x65,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x69,0xcc,0x80,0x00,0x01,0xff,0x69,0xcc,0x81,0x00,0x10,0x08,0x01,0xff, - 0x69,0xcc,0x82,0x00,0x01,0xff,0x69,0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c, - 0x10,0x04,0x01,0x00,0x01,0xff,0x6e,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc, - 0x80,0x00,0x01,0xff,0x6f,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc, - 0x82,0x00,0x01,0xff,0x6f,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00, - 0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x75,0xcc,0x80,0x00, - 0x10,0x08,0x01,0xff,0x75,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x75,0xcc,0x88,0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x04, - 0x01,0x00,0x01,0xff,0x79,0xcc,0x88,0x00,0xe1,0x9a,0x03,0xe0,0xd3,0x01,0xcf,0x86, + 0xcc,0x88,0x00,0xd3,0x38,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x6e, + 0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x81, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6f,0xcc,0x82,0x00,0x01,0xff,0x6f,0xcc,0x83, + 0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x88,0x00,0x01,0x00,0xd2,0x1c,0xd1,0x0c,0x10, + 0x04,0x01,0x00,0x01,0xff,0x75,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x75,0xcc,0x81, + 0x00,0x01,0xff,0x75,0xcc,0x82,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x75,0xcc,0x88, + 0x00,0x01,0xff,0x79,0xcc,0x81,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x79,0xcc,0x88, + 0x00,0xe1,0x9a,0x03,0xe0,0xd3,0x01,0xcf,0x86,0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x84, + 0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x86,0x00,0x01,0xff,0x61,0xcc,0x86,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa8,0x00,0x01,0xff,0x61,0xcc,0xa8,0x00,0x10, + 0x08,0x01,0xff,0x43,0xcc,0x81,0x00,0x01,0xff,0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x82,0x00,0x01,0xff,0x63,0xcc,0x82,0x00,0x10, + 0x08,0x01,0xff,0x43,0xcc,0x87,0x00,0x01,0xff,0x63,0xcc,0x87,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x43,0xcc,0x8c,0x00,0x01,0xff,0x63,0xcc,0x8c,0x00,0x10,0x08,0x01, + 0xff,0x44,0xcc,0x8c,0x00,0x01,0xff,0x64,0xcc,0x8c,0x00,0xd3,0x34,0xd2,0x14,0x51, + 0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x84,0x00,0x01,0xff,0x65,0xcc,0x84, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0x86, + 0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x87,0x00,0x01,0xff,0x65,0xcc,0x87,0x00,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8, + 0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x8c,0x00,0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x82,0x00,0x01,0xff,0x67,0xcc,0x82,0x00,0x10, + 0x08,0x01,0xff,0x47,0xcc,0x86,0x00,0x01,0xff,0x67,0xcc,0x86,0x00,0xd4,0x74,0xd3, + 0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x87,0x00,0x01,0xff,0x67, + 0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7, + 0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0x82,0x00,0x01,0xff,0x68,0xcc,0x82, + 0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x83,0x00,0x01, + 0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x84,0x00,0x01,0xff,0x69, + 0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x86,0x00,0x01,0xff,0x69, + 0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8, + 0x00,0xd3,0x30,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x49,0xcc,0x87,0x00,0x01, + 0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4a,0xcc,0x82,0x00,0x01,0xff,0x6a, + 0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7, + 0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x4c,0xcc,0x81,0x00,0x10, + 0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff,0x4c,0xcc,0xa7,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff,0x4c,0xcc,0x8c,0x00,0x10,0x08,0x01, + 0xff,0x6c,0xcc,0x8c,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd4,0xd4,0x60,0xd3,0x30,0xd2, + 0x10,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x4e,0xcc,0x81,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00,0x01,0xff,0x4e,0xcc,0xa7,0x00,0x10, + 0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff,0x4e,0xcc,0x8c,0x00,0xd2,0x10,0x91, + 0x0c,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00,0x01,0x00,0x01,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x4f,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0x84,0x00,0x10,0x08,0x01, + 0xff,0x4f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc,0x86,0x00,0xd3,0x34,0xd2,0x14,0x91, + 0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8b,0x00,0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x81,0x00,0x01,0xff,0x72,0xcc,0x81, + 0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa7,0x00,0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c, + 0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0x81,0x00,0x01,0xff,0x73,0xcc,0x81,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x82,0x00,0x01,0xff,0x73,0xcc,0x82,0x00,0x10, + 0x08,0x01,0xff,0x53,0xcc,0xa7,0x00,0x01,0xff,0x73,0xcc,0xa7,0x00,0xd4,0x74,0xd3, + 0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x8c,0x00,0x01,0xff,0x73, + 0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7, + 0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c, + 0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x83,0x00,0x01, + 0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x84,0x00,0x01,0xff,0x75, + 0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x86,0x00,0x01,0xff,0x75, + 0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a, + 0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x8b,0x00,0x01, + 0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa8,0x00,0x01,0xff,0x75, + 0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x82,0x00,0x01,0xff,0x77, + 0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x82,0x00,0x01,0xff,0x79,0xcc,0x82, + 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x59,0xcc,0x88,0x00,0x01,0xff,0x5a, + 0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x81,0x00,0x01,0xff,0x5a,0xcc,0x87, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc,0x87,0x00,0x01,0xff,0x5a,0xcc,0x8c, + 0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00,0x01,0x00,0xd0,0x4a,0xcf,0x86,0x55, + 0x04,0x01,0x00,0xd4,0x2c,0xd3,0x18,0x92,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0x4f, + 0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01, + 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0x55,0xcc,0x9b,0x00,0x93, + 0x14,0x92,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x75,0xcc,0x9b,0x00,0x01,0x00,0x01, + 0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xb4,0xd4,0x24,0x53,0x04,0x01,0x00,0x52, + 0x04,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0x41,0xcc,0x8c,0x00,0x10, + 0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff,0x49,0xcc,0x8c,0x00,0xd3,0x46,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc,0x8c,0x00,0x01,0xff,0x4f,0xcc,0x8c, + 0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x8c,0x00,0xd1, + 0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x84, + 0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x55,0xcc,0x88, + 0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x81, + 0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x8c,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88, + 0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01, + 0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x88, + 0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc,0x84,0x00,0xd4,0x80,0xd3,0x3a,0xd2, + 0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x87,0xcc,0x84,0x00,0x01,0xff,0x61, + 0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc3,0x86,0xcc,0x84,0x00,0x01,0xff, + 0xc3,0xa6,0xcc,0x84,0x00,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0x8c, + 0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b, + 0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa8, + 0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa8, + 0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xc6, + 0xb7,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc,0x8c,0x00,0xd3,0x24,0xd2,0x10,0x91, + 0x0c,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00,0x01,0x00,0x01,0x00,0x91,0x10,0x10, + 0x08,0x01,0xff,0x47,0xcc,0x81,0x00,0x01,0xff,0x67,0xcc,0x81,0x00,0x04,0x00,0xd2, + 0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x4e,0xcc,0x80,0x00,0x04,0xff,0x6e,0xcc,0x80, + 0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x8a,0xcc,0x81,0x00,0x01,0xff,0x61,0xcc,0x8a, + 0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xc3,0x86,0xcc,0x81,0x00,0x01,0xff, + 0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xc3,0x98,0xcc,0x81,0x00,0x01,0xff, + 0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x07,0x02,0xe1,0xae,0x01,0xe0,0x93,0x01,0xcf,0x86, 0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc, - 0x84,0x00,0x01,0xff,0x61,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x86,0x00, - 0x01,0xff,0x61,0xcc,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa8,0x00, - 0x01,0xff,0x61,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x43,0xcc,0x81,0x00,0x01,0xff, - 0x63,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x82,0x00, - 0x01,0xff,0x63,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x43,0xcc,0x87,0x00,0x01,0xff, - 0x63,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x43,0xcc,0x8c,0x00,0x01,0xff, - 0x63,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x8c,0x00,0x01,0xff,0x64,0xcc, - 0x8c,0x00,0xd3,0x34,0xd2,0x14,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0x45,0xcc, - 0x84,0x00,0x01,0xff,0x65,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc, - 0x86,0x00,0x01,0xff,0x65,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x87,0x00, - 0x01,0xff,0x65,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc, - 0xa8,0x00,0x01,0xff,0x65,0xcc,0xa8,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x8c,0x00, - 0x01,0xff,0x65,0xcc,0x8c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x82,0x00, - 0x01,0xff,0x67,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x47,0xcc,0x86,0x00,0x01,0xff, - 0x67,0xcc,0x86,0x00,0xd4,0x74,0xd3,0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x47,0xcc,0x87,0x00,0x01,0xff,0x67,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x47,0xcc, - 0xa7,0x00,0x01,0xff,0x67,0xcc,0xa7,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x48,0xcc, - 0x82,0x00,0x01,0xff,0x68,0xcc,0x82,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x49,0xcc,0x83,0x00,0x01,0xff,0x69,0xcc,0x83,0x00,0x10,0x08,0x01,0xff, - 0x49,0xcc,0x84,0x00,0x01,0xff,0x69,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x49,0xcc,0x86,0x00,0x01,0xff,0x69,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x49,0xcc, - 0xa8,0x00,0x01,0xff,0x69,0xcc,0xa8,0x00,0xd3,0x30,0xd2,0x10,0x91,0x0c,0x10,0x08, - 0x01,0xff,0x49,0xcc,0x87,0x00,0x01,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x4a,0xcc,0x82,0x00,0x01,0xff,0x6a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc, - 0xa7,0x00,0x01,0xff,0x6b,0xcc,0xa7,0x00,0xd2,0x1c,0xd1,0x0c,0x10,0x04,0x01,0x00, - 0x01,0xff,0x4c,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0x81,0x00,0x01,0xff, - 0x4c,0xcc,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6c,0xcc,0xa7,0x00,0x01,0xff, - 0x4c,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6c,0xcc,0x8c,0x00,0x01,0x00,0xcf,0x86, - 0xd5,0xd4,0xd4,0x60,0xd3,0x30,0xd2,0x10,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, - 0x01,0xff,0x4e,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x6e,0xcc,0x81,0x00, - 0x01,0xff,0x4e,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x6e,0xcc,0xa7,0x00,0x01,0xff, - 0x4e,0xcc,0x8c,0x00,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x6e,0xcc,0x8c,0x00, - 0x01,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x84,0x00,0x01,0xff, - 0x6f,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x86,0x00,0x01,0xff,0x6f,0xcc, - 0x86,0x00,0xd3,0x34,0xd2,0x14,0x91,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8b,0x00, - 0x01,0xff,0x6f,0xcc,0x8b,0x00,0x01,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc, - 0x81,0x00,0x01,0xff,0x72,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa7,0x00, - 0x01,0xff,0x72,0xcc,0xa7,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc, - 0x8c,0x00,0x01,0xff,0x72,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0x81,0x00, - 0x01,0xff,0x73,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x82,0x00, - 0x01,0xff,0x73,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x53,0xcc,0xa7,0x00,0x01,0xff, - 0x73,0xcc,0xa7,0x00,0xd4,0x74,0xd3,0x34,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x53,0xcc,0x8c,0x00,0x01,0xff,0x73,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x54,0xcc, - 0xa7,0x00,0x01,0xff,0x74,0xcc,0xa7,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x54,0xcc, - 0x8c,0x00,0x01,0xff,0x74,0xcc,0x8c,0x00,0x01,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x55,0xcc,0x83,0x00,0x01,0xff,0x75,0xcc,0x83,0x00,0x10,0x08,0x01,0xff, - 0x55,0xcc,0x84,0x00,0x01,0xff,0x75,0xcc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x55,0xcc,0x86,0x00,0x01,0xff,0x75,0xcc,0x86,0x00,0x10,0x08,0x01,0xff,0x55,0xcc, - 0x8a,0x00,0x01,0xff,0x75,0xcc,0x8a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x55,0xcc,0x8b,0x00,0x01,0xff,0x75,0xcc,0x8b,0x00,0x10,0x08,0x01,0xff, - 0x55,0xcc,0xa8,0x00,0x01,0xff,0x75,0xcc,0xa8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x57,0xcc,0x82,0x00,0x01,0xff,0x77,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x59,0xcc, - 0x82,0x00,0x01,0xff,0x79,0xcc,0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x59,0xcc,0x88,0x00,0x01,0xff,0x5a,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc, - 0x81,0x00,0x01,0xff,0x5a,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x7a,0xcc, - 0x87,0x00,0x01,0xff,0x5a,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x7a,0xcc,0x8c,0x00, - 0x01,0x00,0xd0,0x4a,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x2c,0xd3,0x18,0x92,0x14, - 0x91,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x9b,0x00,0x01,0xff,0x6f,0xcc,0x9b,0x00, + 0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x91,0x00, + 0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc,0x8f,0x00, + 0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x45,0xcc,0x91,0x00,0x01,0xff, + 0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x8f,0x00, + 0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0x91,0x00,0x01,0xff, + 0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0x8f,0x00,0x01,0xff, + 0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc, + 0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc,0x8f,0x00, + 0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0x91,0x00,0x01,0xff, + 0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0x8f,0x00,0x01,0xff, + 0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x91,0x00,0x01,0xff,0x75,0xcc, + 0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x04,0xff,0x53,0xcc,0xa6,0x00,0x04,0xff, + 0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff,0x54,0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc, + 0xa6,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04,0xff,0x48,0xcc,0x8c,0x00,0x04,0xff, + 0x68,0xcc,0x8c,0x00,0xd4,0x68,0xd3,0x20,0xd2,0x0c,0x91,0x08,0x10,0x04,0x06,0x00, + 0x07,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04,0xff,0x41,0xcc,0x87,0x00, + 0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x45,0xcc, + 0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x88,0xcc, + 0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84,0x00,0xd1,0x14,0x10,0x0a,0x04,0xff, + 0x4f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x83,0xcc,0x84,0x00,0x10,0x08, + 0x04,0xff,0x4f,0xcc,0x87,0x00,0x04,0xff,0x6f,0xcc,0x87,0x00,0x93,0x30,0xd2,0x24, + 0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x87,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc, + 0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x59,0xcc,0x84,0x00,0x04,0xff,0x79,0xcc, + 0x84,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0xcf,0x86, + 0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x08,0x00,0x09,0x00,0x09,0x00, + 0x09,0x00,0x01,0x00,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18, + 0x53,0x04,0x01,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00, + 0x11,0x04,0x04,0x00,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01,0x00, + 0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, + 0x04,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x04,0x00, + 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00,0x07,0x00,0xe1,0x35,0x01,0xd0, + 0x72,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0xe6,0xd3,0x10,0x52,0x04,0x01,0xe6,0x91, + 0x08,0x10,0x04,0x01,0xe6,0x01,0xe8,0x01,0xdc,0x92,0x0c,0x51,0x04,0x01,0xdc,0x10, + 0x04,0x01,0xe8,0x01,0xd8,0x01,0xdc,0xd4,0x2c,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10, + 0x04,0x01,0xdc,0x01,0xca,0x10,0x04,0x01,0xca,0x01,0xdc,0x51,0x04,0x01,0xdc,0x10, + 0x04,0x01,0xdc,0x01,0xca,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0xca,0x01,0xdc,0x01, + 0xdc,0x01,0xdc,0xd3,0x08,0x12,0x04,0x01,0xdc,0x01,0x01,0xd2,0x0c,0x91,0x08,0x10, + 0x04,0x01,0x01,0x01,0xdc,0x01,0xdc,0x91,0x08,0x10,0x04,0x01,0xdc,0x01,0xe6,0x01, + 0xe6,0xcf,0x86,0xd5,0x7f,0xd4,0x47,0xd3,0x2e,0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01, + 0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00,0x10,0x04,0x01,0xe6,0x01,0xff,0xcc, + 0x93,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcc,0x88,0xcc,0x81,0x00,0x01,0xf0,0x10, + 0x04,0x04,0xe6,0x04,0xdc,0xd2,0x08,0x11,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10, + 0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x04,0xdc,0x06,0xff,0x00,0xd3,0x18,0xd2,0x0c, + 0x51,0x04,0x07,0xe6,0x10,0x04,0x07,0xe6,0x07,0xdc,0x51,0x04,0x07,0xdc,0x10,0x04, + 0x07,0xdc,0x07,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe8,0x08,0xdc,0x10,0x04, + 0x08,0xdc,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xe9,0x07,0xea,0x10,0x04,0x07,0xea, + 0x07,0xe9,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0xea,0x10,0x04,0x04,0xe9, + 0x06,0xe6,0x06,0xe6,0x06,0xe6,0xd3,0x13,0x52,0x04,0x0a,0x00,0x91,0x0b,0x10,0x07, + 0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10, + 0x04,0x01,0x00,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x06,0x01,0xff,0x3b,0x00,0x10, + 0x00,0xd0,0xe1,0xcf,0x86,0xd5,0x7a,0xd4,0x5f,0xd3,0x21,0x52,0x04,0x00,0x00,0xd1, + 0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x10,0x09,0x01,0xff, + 0xce,0x91,0xcc,0x81,0x00,0x01,0xff,0xc2,0xb7,0x00,0xd2,0x1f,0xd1,0x12,0x10,0x09, + 0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00,0x10,0x09, + 0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0x00,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xce, + 0x9f,0xcc,0x81,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0x01, + 0xff,0xce,0xa9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff, + 0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4, + 0x4a,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01, + 0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x88,0x00, + 0x01,0xff,0xce,0xa5,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc, + 0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc, + 0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13,0x91,0x0f,0x10, + 0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01,0x00,0x01,0x00, + 0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x39,0x53,0x04,0x01,0x00,0xd2,0x16,0x51,0x04, + 0x01,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x88,0x00,0x01,0xff,0xcf,0x85,0xcc, + 0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x01,0xff,0xcf, + 0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x0a,0x00,0xd3, + 0x26,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xcf,0x92,0xcc, + 0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcf,0x92,0xcc,0x88,0x00,0x01,0x00,0x10, + 0x04,0x01,0x00,0x04,0x00,0xd2,0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x01,0x00,0x04, + 0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd4, + 0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x01,0x00,0x01, + 0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x06, + 0x00,0x07,0x00,0x12,0x04,0x07,0x00,0x08,0x00,0xe3,0x47,0x04,0xe2,0xbe,0x02,0xe1, + 0x07,0x01,0xd0,0x8b,0xcf,0x86,0xd5,0x6c,0xd4,0x53,0xd3,0x30,0xd2,0x1f,0xd1,0x12, + 0x10,0x09,0x04,0xff,0xd0,0x95,0xcc,0x80,0x00,0x01,0xff,0xd0,0x95,0xcc,0x88,0x00, + 0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x93,0xcc,0x81,0x00,0x51,0x04,0x01,0x00,0x10, + 0x04,0x01,0x00,0x01,0xff,0xd0,0x86,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1,0x12, + 0x10,0x09,0x01,0xff,0xd0,0x9a,0xcc,0x81,0x00,0x04,0xff,0xd0,0x98,0xcc,0x80,0x00, + 0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x86,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0x92, + 0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x98,0xcc,0x86,0x00,0x01,0x00, + 0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x11,0x91,0x0d,0x10,0x04, + 0x01,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5, + 0x57,0x54,0x04,0x01,0x00,0xd3,0x30,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0, + 0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88,0x00,0x10,0x04,0x01,0x00,0x01, + 0xff,0xd0,0xb3,0xcc,0x81,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff, + 0xd1,0x96,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0, + 0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd1, + 0x83,0xcc,0x86,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x1a,0x52,0x04,0x01,0x00, + 0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb4,0xcc,0x8f,0x00,0x01,0xff,0xd1, + 0xb5,0xcc,0x8f,0x00,0x01,0x00,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x94,0x24,0xd3,0x18, + 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0x51,0x04,0x01,0xe6, + 0x10,0x04,0x01,0xe6,0x0a,0xe6,0x92,0x08,0x11,0x04,0x04,0x00,0x06,0x00,0x04,0x00, + 0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xbe,0xd4,0x4a,0xd3,0x2a,0xd2,0x1a,0xd1,0x0d, + 0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x96,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0, + 0xb6,0xcc,0x86,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04, + 0x06,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04, + 0x06,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00, + 0x09,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x90,0xcc,0x86, + 0x00,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09,0x01,0xff,0xd0,0x90,0xcc,0x88, + 0x00,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff, + 0xd0,0x95,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x86,0x00,0xd2,0x16,0x51,0x04, + 0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x98,0xcc,0x88,0x00,0x01,0xff,0xd3,0x99,0xcc, + 0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x96,0xcc,0x88,0x00,0x01,0xff,0xd0, + 0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x97,0xcc,0x88,0x00,0x01,0xff,0xd0, + 0xb7,0xcc,0x88,0x00,0xd4,0x74,0xd3,0x3a,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09, + 0x01,0xff,0xd0,0x98,0xcc,0x84,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x84,0x00,0xd1,0x12, + 0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x88,0x00, + 0x10,0x09,0x01,0xff,0xd0,0x9e,0xcc,0x88,0x00,0x01,0xff,0xd0,0xbe,0xcc,0x88,0x00, + 0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0xa8,0xcc,0x88,0x00,0x01, + 0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0xad,0xcc,0x88, + 0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x84, + 0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09, + 0x01,0xff,0xd0,0xa3,0xcc,0x88,0x00,0x01,0xff,0xd1,0x83,0xcc,0x88,0x00,0x10,0x09, + 0x01,0xff,0xd0,0xa3,0xcc,0x8b,0x00,0x01,0xff,0xd1,0x83,0xcc,0x8b,0x00,0x91,0x12, + 0x10,0x09,0x01,0xff,0xd0,0xa7,0xcc,0x88,0x00,0x01,0xff,0xd1,0x87,0xcc,0x88,0x00, + 0x08,0x00,0x92,0x16,0x91,0x12,0x10,0x09,0x01,0xff,0xd0,0xab,0xcc,0x88,0x00,0x01, + 0xff,0xd1,0x8b,0xcc,0x88,0x00,0x09,0x00,0x09,0x00,0xd1,0x74,0xd0,0x36,0xcf,0x86, + 0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00, + 0xd4,0x10,0x93,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04,0x0b,0x00,0x0c,0x00,0x10,0x00, + 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00, + 0x01,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00, + 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04, + 0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0xba, + 0xcf,0x86,0xd5,0x4c,0xd4,0x24,0x53,0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04, + 0x14,0x00,0x01,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00, + 0x10,0x00,0x10,0x04,0x10,0x00,0x0d,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04, + 0x00,0x00,0x02,0xdc,0x02,0xe6,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc,0x02,0xe6, + 0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xde,0x02,0xdc,0x02,0xe6,0xd4,0x2c, + 0xd3,0x10,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x08,0xdc,0x02,0xdc,0x02,0xdc, + 0xd2,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc,0x02,0xe6,0xd1,0x08,0x10,0x04, + 0x02,0xe6,0x02,0xde,0x10,0x04,0x02,0xe4,0x02,0xe6,0xd3,0x20,0xd2,0x10,0xd1,0x08, + 0x10,0x04,0x01,0x0a,0x01,0x0b,0x10,0x04,0x01,0x0c,0x01,0x0d,0xd1,0x08,0x10,0x04, + 0x01,0x0e,0x01,0x0f,0x10,0x04,0x01,0x10,0x01,0x11,0xd2,0x10,0xd1,0x08,0x10,0x04, + 0x01,0x12,0x01,0x13,0x10,0x04,0x09,0x13,0x01,0x14,0xd1,0x08,0x10,0x04,0x01,0x15, + 0x01,0x16,0x10,0x04,0x01,0x00,0x01,0x17,0xcf,0x86,0xd5,0x28,0x94,0x24,0x93,0x20, + 0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x18,0x10,0x04,0x01,0x19,0x01,0x00, + 0xd1,0x08,0x10,0x04,0x02,0xe6,0x08,0xdc,0x10,0x04,0x08,0x00,0x08,0x12,0x00,0x00, + 0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04, + 0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x93,0x10, + 0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0xe2,0xfb,0x01,0xe1,0x2b,0x01,0xd0,0xa8,0xcf,0x86,0xd5,0x55,0xd4,0x28,0xd3,0x10, + 0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x0a,0x00,0xd2,0x0c, + 0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x08,0x00,0x91,0x08,0x10,0x04,0x01,0x00, + 0x07,0x00,0x07,0x00,0xd3,0x0c,0x52,0x04,0x07,0xe6,0x11,0x04,0x07,0xe6,0x0a,0xe6, + 0xd2,0x10,0xd1,0x08,0x10,0x04,0x0a,0x1e,0x0a,0x1f,0x10,0x04,0x0a,0x20,0x01,0x00, + 0xd1,0x09,0x10,0x05,0x0f,0xff,0x00,0x00,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0xd4, + 0x3d,0x93,0x39,0xd2,0x1a,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x09,0x01, + 0xff,0xd8,0xa7,0xd9,0x93,0x00,0x01,0xff,0xd8,0xa7,0xd9,0x94,0x00,0xd1,0x12,0x10, + 0x09,0x01,0xff,0xd9,0x88,0xd9,0x94,0x00,0x01,0xff,0xd8,0xa7,0xd9,0x95,0x00,0x10, + 0x09,0x01,0xff,0xd9,0x8a,0xd9,0x94,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00, + 0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x86, + 0xd5,0x5c,0xd4,0x20,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04, + 0x01,0x00,0x01,0x1b,0xd1,0x08,0x10,0x04,0x01,0x1c,0x01,0x1d,0x10,0x04,0x01,0x1e, + 0x01,0x1f,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x20,0x01,0x21,0x10,0x04, + 0x01,0x22,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x07,0xdc, + 0x07,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0xe6,0x08,0xe6,0x08,0xe6,0xd1,0x08, + 0x10,0x04,0x08,0xdc,0x08,0xe6,0x10,0x04,0x08,0xe6,0x0c,0xdc,0xd4,0x10,0x53,0x04, + 0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06,0x00,0x93,0x10,0x92,0x0c, + 0x91,0x08,0x10,0x04,0x01,0x23,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x22, + 0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x08, + 0x11,0x04,0x04,0x00,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00, + 0xcf,0x86,0xd5,0x5b,0xd4,0x2e,0xd3,0x1e,0x92,0x1a,0xd1,0x0d,0x10,0x09,0x01,0xff, + 0xdb,0x95,0xd9,0x94,0x00,0x01,0x00,0x10,0x09,0x01,0xff,0xdb,0x81,0xd9,0x94,0x00, 0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, - 0x01,0xff,0x55,0xcc,0x9b,0x00,0x93,0x14,0x92,0x10,0x91,0x0c,0x10,0x08,0x01,0xff, - 0x75,0xcc,0x9b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xb4, - 0xd4,0x24,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00, - 0x01,0xff,0x41,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x61,0xcc,0x8c,0x00,0x01,0xff, - 0x49,0xcc,0x8c,0x00,0xd3,0x46,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x69,0xcc, - 0x8c,0x00,0x01,0xff,0x4f,0xcc,0x8c,0x00,0x10,0x08,0x01,0xff,0x6f,0xcc,0x8c,0x00, - 0x01,0xff,0x55,0xcc,0x8c,0x00,0xd1,0x12,0x10,0x08,0x01,0xff,0x75,0xcc,0x8c,0x00, - 0x01,0xff,0x55,0xcc,0x88,0xcc,0x84,0x00,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc, - 0x84,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x81,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a, - 0x01,0xff,0x75,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc,0x8c,0x00, - 0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x8c,0x00,0x01,0xff,0x55,0xcc,0x88,0xcc, - 0x80,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0x75,0xcc,0x88,0xcc,0x80,0x00,0x01,0x00, - 0x10,0x0a,0x01,0xff,0x41,0xcc,0x88,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x88,0xcc, - 0x84,0x00,0xd4,0x80,0xd3,0x3a,0xd2,0x26,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc, - 0x87,0xcc,0x84,0x00,0x01,0xff,0x61,0xcc,0x87,0xcc,0x84,0x00,0x10,0x09,0x01,0xff, - 0xc3,0x86,0xcc,0x84,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x84,0x00,0x51,0x04,0x01,0x00, - 0x10,0x08,0x01,0xff,0x47,0xcc,0x8c,0x00,0x01,0xff,0x67,0xcc,0x8c,0x00,0xd2,0x20, - 0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x8c,0x00,0x01,0xff,0x6b,0xcc,0x8c,0x00, - 0x10,0x08,0x01,0xff,0x4f,0xcc,0xa8,0x00,0x01,0xff,0x6f,0xcc,0xa8,0x00,0xd1,0x14, - 0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa8,0xcc,0x84,0x00,0x01,0xff,0x6f,0xcc,0xa8,0xcc, - 0x84,0x00,0x10,0x09,0x01,0xff,0xc6,0xb7,0xcc,0x8c,0x00,0x01,0xff,0xca,0x92,0xcc, - 0x8c,0x00,0xd3,0x24,0xd2,0x10,0x91,0x0c,0x10,0x08,0x01,0xff,0x6a,0xcc,0x8c,0x00, - 0x01,0x00,0x01,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x81,0x00,0x01,0xff, - 0x67,0xcc,0x81,0x00,0x04,0x00,0xd2,0x24,0xd1,0x10,0x10,0x08,0x04,0xff,0x4e,0xcc, - 0x80,0x00,0x04,0xff,0x6e,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x8a,0xcc, - 0x81,0x00,0x01,0xff,0x61,0xcc,0x8a,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff, - 0xc3,0x86,0xcc,0x81,0x00,0x01,0xff,0xc3,0xa6,0xcc,0x81,0x00,0x10,0x09,0x01,0xff, - 0xc3,0x98,0xcc,0x81,0x00,0x01,0xff,0xc3,0xb8,0xcc,0x81,0x00,0xe2,0x07,0x02,0xe1, - 0xae,0x01,0xe0,0x93,0x01,0xcf,0x86,0xd5,0xf4,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0x8f,0x00,0x01,0xff,0x61,0xcc,0x8f,0x00,0x10, - 0x08,0x01,0xff,0x41,0xcc,0x91,0x00,0x01,0xff,0x61,0xcc,0x91,0x00,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x45,0xcc,0x8f,0x00,0x01,0xff,0x65,0xcc,0x8f,0x00,0x10,0x08,0x01, - 0xff,0x45,0xcc,0x91,0x00,0x01,0xff,0x65,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x49,0xcc,0x8f,0x00,0x01,0xff,0x69,0xcc,0x8f,0x00,0x10,0x08,0x01, - 0xff,0x49,0xcc,0x91,0x00,0x01,0xff,0x69,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01, - 0xff,0x4f,0xcc,0x8f,0x00,0x01,0xff,0x6f,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x4f, - 0xcc,0x91,0x00,0x01,0xff,0x6f,0xcc,0x91,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x52,0xcc,0x8f,0x00,0x01,0xff,0x72,0xcc,0x8f,0x00,0x10,0x08,0x01, - 0xff,0x52,0xcc,0x91,0x00,0x01,0xff,0x72,0xcc,0x91,0x00,0xd1,0x10,0x10,0x08,0x01, - 0xff,0x55,0xcc,0x8f,0x00,0x01,0xff,0x75,0xcc,0x8f,0x00,0x10,0x08,0x01,0xff,0x55, - 0xcc,0x91,0x00,0x01,0xff,0x75,0xcc,0x91,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x04, - 0xff,0x53,0xcc,0xa6,0x00,0x04,0xff,0x73,0xcc,0xa6,0x00,0x10,0x08,0x04,0xff,0x54, - 0xcc,0xa6,0x00,0x04,0xff,0x74,0xcc,0xa6,0x00,0x51,0x04,0x04,0x00,0x10,0x08,0x04, - 0xff,0x48,0xcc,0x8c,0x00,0x04,0xff,0x68,0xcc,0x8c,0x00,0xd4,0x68,0xd3,0x20,0xd2, - 0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x07,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10, - 0x08,0x04,0xff,0x41,0xcc,0x87,0x00,0x04,0xff,0x61,0xcc,0x87,0x00,0xd2,0x24,0xd1, - 0x10,0x10,0x08,0x04,0xff,0x45,0xcc,0xa7,0x00,0x04,0xff,0x65,0xcc,0xa7,0x00,0x10, - 0x0a,0x04,0xff,0x4f,0xcc,0x88,0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x88,0xcc,0x84, - 0x00,0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x83,0xcc,0x84,0x00,0x04,0xff,0x6f, - 0xcc,0x83,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x4f,0xcc,0x87,0x00,0x04,0xff,0x6f, - 0xcc,0x87,0x00,0x93,0x30,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x04,0xff,0x4f,0xcc,0x87, - 0xcc,0x84,0x00,0x04,0xff,0x6f,0xcc,0x87,0xcc,0x84,0x00,0x10,0x08,0x04,0xff,0x59, - 0xcc,0x84,0x00,0x04,0xff,0x79,0xcc,0x84,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07, - 0x00,0x08,0x00,0x08,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08,0x11, - 0x04,0x08,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x01,0x00,0x01,0x00,0xd0,0x22,0xcf, - 0x86,0x55,0x04,0x01,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2,0x0c,0x91,0x08,0x10, - 0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x07,0x00,0x01,0x00,0xcf, - 0x86,0xd5,0x18,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51, - 0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x94,0x18,0x53,0x04,0x01,0x00,0xd2, - 0x08,0x11,0x04,0x01,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07, - 0x00,0x07,0x00,0xe1,0x34,0x01,0xd0,0x72,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0xe6, - 0xd3,0x10,0x52,0x04,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0xe8,0x01,0xdc, - 0x92,0x0c,0x51,0x04,0x01,0xdc,0x10,0x04,0x01,0xe8,0x01,0xd8,0x01,0xdc,0xd4,0x2c, - 0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0xdc,0x01,0xca,0x10,0x04,0x01,0xca, - 0x01,0xdc,0x51,0x04,0x01,0xdc,0x10,0x04,0x01,0xdc,0x01,0xca,0x92,0x0c,0x91,0x08, - 0x10,0x04,0x01,0xca,0x01,0xdc,0x01,0xdc,0x01,0xdc,0xd3,0x08,0x12,0x04,0x01,0xdc, - 0x01,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x01,0x01,0xdc,0x01,0xdc,0x91,0x08, - 0x10,0x04,0x01,0xdc,0x01,0xe6,0x01,0xe6,0xcf,0x86,0xd5,0x7e,0xd4,0x46,0xd3,0x2e, - 0xd2,0x19,0xd1,0x0e,0x10,0x07,0x01,0xff,0xcc,0x80,0x00,0x01,0xff,0xcc,0x81,0x00, - 0x10,0x04,0x01,0xe6,0x01,0xff,0xcc,0x93,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcc, - 0x88,0xcc,0x81,0x00,0x01,0xf0,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x08,0x11,0x04, - 0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x10,0x04,0x04,0xdc, - 0x06,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x07,0xe6,0x10,0x04,0x07,0xe6,0x07,0xdc, - 0x51,0x04,0x07,0xdc,0x10,0x04,0x07,0xdc,0x07,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04, - 0x08,0xe8,0x08,0xdc,0x10,0x04,0x08,0xdc,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xe9, - 0x07,0xea,0x10,0x04,0x07,0xea,0x07,0xe9,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04, - 0x01,0xea,0x10,0x04,0x04,0xe9,0x06,0xe6,0x06,0xe6,0x06,0xe6,0xd3,0x13,0x52,0x04, - 0x0a,0x00,0x91,0x0b,0x10,0x07,0x01,0xff,0xca,0xb9,0x00,0x01,0x00,0x0a,0x00,0xd2, - 0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x01,0x00,0x09,0x00,0x51,0x04,0x09,0x00,0x10, - 0x06,0x01,0xff,0x3b,0x00,0x10,0x00,0xd0,0xe1,0xcf,0x86,0xd5,0x7a,0xd4,0x5f,0xd3, - 0x21,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xc2,0xa8,0xcc, - 0x81,0x00,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0x01,0xff,0xc2,0xb7,0x00, - 0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x01,0xff,0xce, - 0x97,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0x00,0x00,0xd1, - 0x0d,0x10,0x09,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x00,0x00,0x10,0x09,0x01,0xff, - 0xce,0xa5,0xcc,0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0x93,0x17,0x92,0x13, - 0x91,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x01,0x00,0x01, - 0x00,0x01,0x00,0x01,0x00,0xd4,0x4a,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10, - 0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01, - 0xff,0xce,0x99,0xcc,0x88,0x00,0x01,0xff,0xce,0xa5,0xcc,0x88,0x00,0xd1,0x12,0x10, - 0x09,0x01,0xff,0xce,0xb1,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x81,0x00,0x10, - 0x09,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0x93, - 0x17,0x92,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x81,0x00, - 0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x39,0x53,0x04, - 0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x88, - 0x00,0x01,0xff,0xcf,0x85,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xbf, - 0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf,0x89, - 0xcc,0x81,0x00,0x0a,0x00,0xd3,0x26,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01, - 0x00,0x01,0xff,0xcf,0x92,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xcf,0x92, - 0xcc,0x88,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd2,0x0c,0x51,0x04,0x06, - 0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10, - 0x04,0x01,0x00,0x04,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01, - 0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51, - 0x04,0x05,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0x12,0x04,0x07,0x00,0x08,0x00,0xe3, - 0x47,0x04,0xe2,0xbe,0x02,0xe1,0x07,0x01,0xd0,0x8b,0xcf,0x86,0xd5,0x6c,0xd4,0x53, - 0xd3,0x30,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x04,0xff,0xd0,0x95,0xcc,0x80,0x00,0x01, - 0xff,0xd0,0x95,0xcc,0x88,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x93,0xcc,0x81, - 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x86,0xcc,0x88,0x00, - 0x52,0x04,0x01,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x9a,0xcc,0x81,0x00,0x04, - 0xff,0xd0,0x98,0xcc,0x80,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x86,0x00,0x01, - 0x00,0x53,0x04,0x01,0x00,0x92,0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0, - 0x98,0xcc,0x86,0x00,0x01,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00, - 0x92,0x11,0x91,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0xb8,0xcc,0x86,0x00,0x01, - 0x00,0x01,0x00,0xcf,0x86,0xd5,0x57,0x54,0x04,0x01,0x00,0xd3,0x30,0xd2,0x1f,0xd1, - 0x12,0x10,0x09,0x04,0xff,0xd0,0xb5,0xcc,0x80,0x00,0x01,0xff,0xd0,0xb5,0xcc,0x88, - 0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0xb3,0xcc,0x81,0x00,0x51,0x04,0x01,0x00, - 0x10,0x04,0x01,0x00,0x01,0xff,0xd1,0x96,0xcc,0x88,0x00,0x52,0x04,0x01,0x00,0xd1, - 0x12,0x10,0x09,0x01,0xff,0xd0,0xba,0xcc,0x81,0x00,0x04,0xff,0xd0,0xb8,0xcc,0x80, - 0x00,0x10,0x09,0x01,0xff,0xd1,0x83,0xcc,0x86,0x00,0x01,0x00,0x54,0x04,0x01,0x00, - 0x93,0x1a,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd1,0xb4, - 0xcc,0x8f,0x00,0x01,0xff,0xd1,0xb5,0xcc,0x8f,0x00,0x01,0x00,0xd0,0x2e,0xcf,0x86, - 0x95,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, - 0x01,0xe6,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6,0x0a,0xe6,0x92,0x08,0x11,0x04, - 0x04,0x00,0x06,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0xbe,0xd4,0x4a, - 0xd3,0x2a,0xd2,0x1a,0xd1,0x0d,0x10,0x04,0x01,0x00,0x01,0xff,0xd0,0x96,0xcc,0x86, - 0x00,0x10,0x09,0x01,0xff,0xd0,0xb6,0xcc,0x86,0x00,0x01,0x00,0xd1,0x08,0x10,0x04, - 0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04, - 0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00, - 0x06,0x00,0x10,0x04,0x06,0x00,0x09,0x00,0xd3,0x3a,0xd2,0x24,0xd1,0x12,0x10,0x09, - 0x01,0xff,0xd0,0x90,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb0,0xcc,0x86,0x00,0x10,0x09, - 0x01,0xff,0xd0,0x90,0xcc,0x88,0x00,0x01,0xff,0xd0,0xb0,0xcc,0x88,0x00,0x51,0x04, - 0x01,0x00,0x10,0x09,0x01,0xff,0xd0,0x95,0xcc,0x86,0x00,0x01,0xff,0xd0,0xb5,0xcc, - 0x86,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd3,0x98,0xcc,0x88, - 0x00,0x01,0xff,0xd3,0x99,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x96, - 0xcc,0x88,0x00,0x01,0xff,0xd0,0xb6,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x97, - 0xcc,0x88,0x00,0x01,0xff,0xd0,0xb7,0xcc,0x88,0x00,0xd4,0x74,0xd3,0x3a,0xd2,0x16, - 0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x84,0x00,0x01,0xff,0xd0, - 0xb8,0xcc,0x84,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0x98,0xcc,0x88,0x00,0x01, - 0xff,0xd0,0xb8,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0x9e,0xcc,0x88,0x00,0x01, - 0xff,0xd0,0xbe,0xcc,0x88,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff, - 0xd3,0xa8,0xcc,0x88,0x00,0x01,0xff,0xd3,0xa9,0xcc,0x88,0x00,0xd1,0x12,0x10,0x09, - 0x04,0xff,0xd0,0xad,0xcc,0x88,0x00,0x04,0xff,0xd1,0x8d,0xcc,0x88,0x00,0x10,0x09, - 0x01,0xff,0xd0,0xa3,0xcc,0x84,0x00,0x01,0xff,0xd1,0x83,0xcc,0x84,0x00,0xd3,0x3a, - 0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x88,0x00,0x01,0xff,0xd1, - 0x83,0xcc,0x88,0x00,0x10,0x09,0x01,0xff,0xd0,0xa3,0xcc,0x8b,0x00,0x01,0xff,0xd1, - 0x83,0xcc,0x8b,0x00,0x91,0x12,0x10,0x09,0x01,0xff,0xd0,0xa7,0xcc,0x88,0x00,0x01, - 0xff,0xd1,0x87,0xcc,0x88,0x00,0x08,0x00,0x92,0x16,0x91,0x12,0x10,0x09,0x01,0xff, - 0xd0,0xab,0xcc,0x88,0x00,0x01,0xff,0xd1,0x8b,0xcc,0x88,0x00,0x09,0x00,0x09,0x00, - 0xd1,0x74,0xd0,0x36,0xcf,0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04, - 0x09,0x00,0x0a,0x00,0x0a,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04, - 0x0b,0x00,0x0c,0x00,0x10,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00, - 0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x01,0x00, - 0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00, - 0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14, - 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x01,0x00, - 0x01,0x00,0x01,0x00,0xd0,0xba,0xcf,0x86,0xd5,0x4c,0xd4,0x24,0x53,0x04,0x01,0x00, - 0xd2,0x10,0xd1,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x10,0x04,0x04,0x00,0x00,0x00, - 0xd1,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x0d,0x00,0xd3,0x18, - 0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02,0xdc,0x02,0xe6,0x51,0x04,0x02,0xe6, - 0x10,0x04,0x02,0xdc,0x02,0xe6,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xde, - 0x02,0xdc,0x02,0xe6,0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04, - 0x08,0xdc,0x02,0xdc,0x02,0xdc,0xd2,0x0c,0x51,0x04,0x02,0xe6,0x10,0x04,0x02,0xdc, - 0x02,0xe6,0xd1,0x08,0x10,0x04,0x02,0xe6,0x02,0xde,0x10,0x04,0x02,0xe4,0x02,0xe6, - 0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x0a,0x01,0x0b,0x10,0x04,0x01,0x0c, - 0x01,0x0d,0xd1,0x08,0x10,0x04,0x01,0x0e,0x01,0x0f,0x10,0x04,0x01,0x10,0x01,0x11, - 0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x12,0x01,0x13,0x10,0x04,0x09,0x13,0x01,0x14, - 0xd1,0x08,0x10,0x04,0x01,0x15,0x01,0x16,0x10,0x04,0x01,0x00,0x01,0x17,0xcf,0x86, - 0xd5,0x28,0x94,0x24,0x93,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x18, - 0x10,0x04,0x01,0x19,0x01,0x00,0xd1,0x08,0x10,0x04,0x02,0xe6,0x08,0xdc,0x10,0x04, - 0x08,0x00,0x08,0x12,0x00,0x00,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c, - 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04, - 0x00,0x00,0x14,0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0xe2,0xfa,0x01,0xe1,0x2a,0x01,0xd0,0xa7,0xcf,0x86, - 0xd5,0x54,0xd4,0x28,0xd3,0x10,0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x0d,0x00, - 0x10,0x00,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x08,0x00, - 0x91,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0xd3,0x0c,0x52,0x04,0x07,0xe6, - 0x11,0x04,0x07,0xe6,0x0a,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0a,0x1e,0x0a,0x1f, - 0x10,0x04,0x0a,0x20,0x01,0x00,0xd1,0x08,0x10,0x04,0x0f,0x00,0x00,0x00,0x10,0x04, - 0x08,0x00,0x01,0x00,0xd4,0x3d,0x93,0x39,0xd2,0x1a,0xd1,0x08,0x10,0x04,0x0c,0x00, - 0x01,0x00,0x10,0x09,0x01,0xff,0xd8,0xa7,0xd9,0x93,0x00,0x01,0xff,0xd8,0xa7,0xd9, - 0x94,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd9,0x88,0xd9,0x94,0x00,0x01,0xff,0xd8, - 0xa7,0xd9,0x95,0x00,0x10,0x09,0x01,0xff,0xd9,0x8a,0xd9,0x94,0x00,0x01,0x00,0x01, - 0x00,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0a, - 0x00,0x0a,0x00,0xcf,0x86,0xd5,0x5c,0xd4,0x20,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51, - 0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0x1b,0xd1,0x08,0x10,0x04,0x01,0x1c,0x01, - 0x1d,0x10,0x04,0x01,0x1e,0x01,0x1f,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01, - 0x20,0x01,0x21,0x10,0x04,0x01,0x22,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xe6,0x04, - 0xdc,0x10,0x04,0x07,0xdc,0x07,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0xe6,0x08, - 0xe6,0x08,0xe6,0xd1,0x08,0x10,0x04,0x08,0xdc,0x08,0xe6,0x10,0x04,0x08,0xe6,0x0c, - 0xdc,0xd4,0x10,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06, - 0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x23,0x01,0x00,0x01,0x00,0x01, - 0x00,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53, - 0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x04,0x00,0x01,0x00,0x51,0x04,0x01,0x00,0x10, - 0x04,0x01,0x00,0x04,0x00,0xcf,0x86,0xd5,0x5b,0xd4,0x2e,0xd3,0x1e,0x92,0x1a,0xd1, - 0x0d,0x10,0x09,0x01,0xff,0xdb,0x95,0xd9,0x94,0x00,0x01,0x00,0x10,0x09,0x01,0xff, - 0xdb,0x81,0xd9,0x94,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01, - 0x00,0x10,0x04,0x01,0x00,0x04,0x00,0xd3,0x19,0xd2,0x11,0x51,0x04,0x01,0x00,0x10, - 0x04,0x01,0x00,0x01,0xff,0xdb,0x92,0xd9,0x94,0x00,0x11,0x04,0x01,0x00,0x01,0xe6, - 0x52,0x04,0x01,0xe6,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00, - 0x01,0xe6,0xd4,0x38,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6, - 0x01,0xdc,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6, - 0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0xdc,0x01,0xe6, - 0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0xdc,0x07,0x00,0x53,0x04,0x01,0x00,0xd2,0x08, - 0x11,0x04,0x01,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00, - 0xd1,0xc8,0xd0,0x76,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04, - 0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x93,0x10,0x92,0x0c, - 0x91,0x08,0x10,0x04,0x04,0x00,0x04,0x24,0x04,0x00,0x04,0x00,0x04,0x00,0xd4,0x14, - 0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07,0x00, - 0x07,0x00,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x04,0xe6, - 0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x0c, - 0x51,0x04,0x04,0xdc,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd1,0x08,0x10,0x04,0x04,0xdc, - 0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xcf,0x86,0xd5,0x3c,0x94,0x38,0xd3,0x1c, - 0xd2,0x0c,0x51,0x04,0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04, - 0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04, - 0x04,0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00, - 0x07,0x00,0x07,0x00,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00, - 0x11,0x04,0x08,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x04,0x00, - 0x54,0x04,0x04,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x14,0x53,0x04, - 0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xe6,0x09,0xe6, - 0xd3,0x10,0x92,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0x00, - 0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x91,0x08,0x10,0x04, - 0x00,0x00,0x14,0xdc,0x14,0x00,0xe4,0x78,0x57,0xe3,0xda,0x3e,0xe2,0x89,0x3e,0xe1, - 0x91,0x2c,0xe0,0x21,0x10,0xcf,0x86,0xc5,0xe4,0x80,0x08,0xe3,0xcb,0x03,0xe2,0x61, - 0x01,0xd1,0x94,0xd0,0x5a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52, - 0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6,0x92,0x0c,0x51,0x04,0x0b,0xe6,0x10, - 0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6,0xd4,0x24,0xd3,0x10,0x52,0x04,0x0b,0xe6,0x91, - 0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b, - 0x00,0x0b,0xe6,0x0b,0xe6,0x11,0x04,0x0b,0xe6,0x00,0x00,0x53,0x04,0x0b,0x00,0x52, - 0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x86,0xd5, - 0x20,0x54,0x04,0x0c,0x00,0x53,0x04,0x0c,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0c, - 0x00,0x0c,0xdc,0x0c,0xdc,0x51,0x04,0x00,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x94, - 0x14,0x53,0x04,0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0xd0,0x4a,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x20,0xd3, - 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x0d,0x00,0x0d,0x00,0x52, - 0x04,0x0d,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2, - 0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x11,0x00,0x91,0x08,0x10,0x04,0x11, - 0x00,0x00,0x00,0x12,0x00,0x52,0x04,0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0xcf, - 0x86,0xd5,0x18,0x54,0x04,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10, - 0x04,0x00,0x00,0x14,0xdc,0x12,0xe6,0x12,0xe6,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x51, - 0x04,0x12,0xe6,0x10,0x04,0x12,0x00,0x11,0xdc,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d, - 0xdc,0x0d,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xe6,0x91, - 0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xdc,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10, - 0x04,0x0d,0x1b,0x0d,0x1c,0x10,0x04,0x0d,0x1d,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10, - 0x04,0x0d,0xdc,0x0d,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x10, - 0x04,0x0d,0xdc,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x10,0xe6,0xe1, - 0x3a,0x01,0xd0,0x77,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08, - 0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00, - 0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x1b,0x53,0x04,0x01,0x00,0x92,0x13,0x91,0x0f, - 0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xa8,0xe0,0xa4,0xbc,0x00,0x01,0x00,0x01, - 0x00,0xd3,0x26,0xd2,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xb0, - 0xe0,0xa4,0xbc,0x00,0x01,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xb3,0xe0, - 0xa4,0xbc,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x91, - 0x08,0x10,0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x8c,0xd4,0x18,0x53, - 0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x10, - 0x04,0x0b,0x00,0x0c,0x00,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01, - 0xe6,0x10,0x04,0x01,0xdc,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x0b,0x00,0x0c, - 0x00,0xd2,0x2c,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x95,0xe0,0xa4,0xbc,0x00, - 0x01,0xff,0xe0,0xa4,0x96,0xe0,0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x97, - 0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0x9c,0xe0,0xa4,0xbc,0x00,0xd1,0x16,0x10, - 0x0b,0x01,0xff,0xe0,0xa4,0xa1,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xa2,0xe0, - 0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xab,0xe0,0xa4,0xbc,0x00,0x01,0xff, - 0xe0,0xa4,0xaf,0xe0,0xa4,0xbc,0x00,0x54,0x04,0x01,0x00,0xd3,0x14,0x92,0x10,0xd1, - 0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0c,0x00,0x0c,0x00,0xd2, - 0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x0b,0x00,0x10,0x04,0x0b,0x00,0x09,0x00,0x91, - 0x08,0x10,0x04,0x09,0x00,0x08,0x00,0x09,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4, - 0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91, - 0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10, - 0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1, - 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01, - 0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00, - 0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00, - 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11, - 0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x07,0x00,0x01,0x00,0xcf, - 0x86,0xd5,0x7b,0xd4,0x42,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01, - 0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01, - 0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa6,0xbe,0x00, - 0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa7,0x97,0x00,0x01,0x09,0x10, - 0x04,0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10, - 0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0, - 0xa6,0xa1,0xe0,0xa6,0xbc,0x00,0x01,0xff,0xe0,0xa6,0xa2,0xe0,0xa6,0xbc,0x00,0x10, - 0x04,0x00,0x00,0x01,0xff,0xe0,0xa6,0xaf,0xe0,0xa6,0xbc,0x00,0xd4,0x10,0x93,0x0c, - 0x52,0x04,0x01,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00, - 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x13,0x00, - 0x10,0x04,0x14,0xe6,0x00,0x00,0xe2,0x48,0x02,0xe1,0x4f,0x01,0xd0,0xa4,0xcf,0x86, - 0xd5,0x4c,0xd4,0x34,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x07,0x00, - 0x10,0x04,0x01,0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00, - 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00, + 0x04,0x00,0xd3,0x19,0xd2,0x11,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff, + 0xdb,0x92,0xd9,0x94,0x00,0x11,0x04,0x01,0x00,0x01,0xe6,0x52,0x04,0x01,0xe6,0xd1, + 0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0xd4,0x38,0xd3, + 0x1c,0xd2,0x0c,0x51,0x04,0x01,0xe6,0x10,0x04,0x01,0xe6,0x01,0xdc,0xd1,0x08,0x10, + 0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xe6,0xd2,0x10,0xd1,0x08,0x10, + 0x04,0x01,0xe6,0x01,0x00,0x10,0x04,0x01,0xdc,0x01,0xe6,0x91,0x08,0x10,0x04,0x01, + 0xe6,0x01,0xdc,0x07,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x04, + 0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x07,0x00,0xd1,0xc8,0xd0,0x76,0xcf, + 0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04, + 0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04, + 0x00,0x04,0x24,0x04,0x00,0x04,0x00,0x04,0x00,0xd4,0x14,0x53,0x04,0x04,0x00,0x52, + 0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07,0x00,0x07,0x00,0xd3,0x1c,0xd2, + 0x0c,0x91,0x08,0x10,0x04,0x04,0xe6,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04, + 0xdc,0x04,0xe6,0x10,0x04,0x04,0xe6,0x04,0xdc,0xd2,0x0c,0x51,0x04,0x04,0xdc,0x10, + 0x04,0x04,0xe6,0x04,0xdc,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10,0x04,0x04, + 0xdc,0x04,0xe6,0xcf,0x86,0xd5,0x3c,0x94,0x38,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x04, + 0xe6,0x10,0x04,0x04,0xdc,0x04,0xe6,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10, + 0x04,0x04,0xdc,0x04,0xe6,0xd2,0x10,0xd1,0x08,0x10,0x04,0x04,0xdc,0x04,0xe6,0x10, + 0x04,0x04,0xe6,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x08, + 0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0a, + 0x00,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93, + 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x14,0x53,0x04,0x09,0x00,0x92,0x0c,0x51, + 0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xe6,0x09,0xe6,0xd3,0x10,0x92,0x0c,0x51, + 0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09, + 0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x14,0xdc,0x14, + 0x00,0xe4,0xf8,0x57,0xe3,0x45,0x3f,0xe2,0xf4,0x3e,0xe1,0xc7,0x2c,0xe0,0x21,0x10, + 0xcf,0x86,0xc5,0xe4,0x80,0x08,0xe3,0xcb,0x03,0xe2,0x61,0x01,0xd1,0x94,0xd0,0x5a, + 0xcf,0x86,0xd5,0x20,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04, + 0x0b,0x00,0x0b,0xe6,0x92,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04,0x0b,0x00,0x0b,0xe6, + 0x0b,0xe6,0xd4,0x24,0xd3,0x10,0x52,0x04,0x0b,0xe6,0x91,0x08,0x10,0x04,0x0b,0x00, + 0x0b,0xe6,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b,0xe6, + 0x11,0x04,0x0b,0xe6,0x00,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04, + 0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x86,0xd5,0x20,0x54,0x04,0x0c,0x00, + 0x53,0x04,0x0c,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x0c,0xdc,0x0c,0xdc, + 0x51,0x04,0x00,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x13,0x00, + 0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0xd0,0x4a,0xcf,0x86,0x55,0x04,0x00,0x00,0xd4,0x20,0xd3,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x0d,0x00,0x10,0x00,0x0d,0x00,0x0d,0x00,0x52,0x04,0x0d,0x00,0x91,0x08, + 0x10,0x04,0x0d,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00, + 0x10,0x04,0x10,0x00,0x11,0x00,0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x12,0x00, + 0x52,0x04,0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04, + 0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x14,0xdc, + 0x12,0xe6,0x12,0xe6,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x12,0xe6,0x10,0x04, + 0x12,0x00,0x11,0xdc,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xdc,0x0d,0xe6,0xd2,0x0c, + 0x91,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x0d,0xe6,0x91,0x08,0x10,0x04,0x0d,0xe6, + 0x0d,0xdc,0x0d,0xdc,0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0x1b,0x0d,0x1c, + 0x10,0x04,0x0d,0x1d,0x0d,0xe6,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xdc,0x0d,0xe6, + 0xd2,0x10,0xd1,0x08,0x10,0x04,0x0d,0xe6,0x0d,0xdc,0x10,0x04,0x0d,0xdc,0x0d,0xe6, + 0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x10,0xe6,0xe1,0x3a,0x01,0xd0,0x77,0xcf, + 0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x01, + 0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01, + 0x00,0xd4,0x1b,0x53,0x04,0x01,0x00,0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01, + 0xff,0xe0,0xa4,0xa8,0xe0,0xa4,0xbc,0x00,0x01,0x00,0x01,0x00,0xd3,0x26,0xd2,0x13, + 0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa4,0xb0,0xe0,0xa4,0xbc,0x00,0x01, + 0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xa4,0xb3,0xe0,0xa4,0xbc,0x00,0x01,0x00, + 0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x91,0x08,0x10,0x04,0x01,0x07, + 0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x8c,0xd4,0x18,0x53,0x04,0x01,0x00,0x52,0x04, + 0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x10,0x04,0x0b,0x00,0x0c,0x00, + 0xd3,0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x10,0x04,0x01,0xdc, + 0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x0b,0x00,0x0c,0x00,0xd2,0x2c,0xd1,0x16, + 0x10,0x0b,0x01,0xff,0xe0,0xa4,0x95,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0x96, + 0xe0,0xa4,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa4,0x97,0xe0,0xa4,0xbc,0x00,0x01, + 0xff,0xe0,0xa4,0x9c,0xe0,0xa4,0xbc,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa4, + 0xa1,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xa2,0xe0,0xa4,0xbc,0x00,0x10,0x0b, + 0x01,0xff,0xe0,0xa4,0xab,0xe0,0xa4,0xbc,0x00,0x01,0xff,0xe0,0xa4,0xaf,0xe0,0xa4, + 0xbc,0x00,0x54,0x04,0x01,0x00,0xd3,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00, + 0x0a,0x00,0x10,0x04,0x0a,0x00,0x0c,0x00,0x0c,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04, + 0x10,0x00,0x0b,0x00,0x10,0x04,0x0b,0x00,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00, + 0x08,0x00,0x09,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c, + 0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00, + 0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00, 0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00, 0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04, 0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00, - 0xd3,0x2e,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00, - 0x01,0xff,0xe0,0xa8,0xb2,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01, - 0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0xb8,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd2,0x08, - 0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x00,0x00,0x01,0x00, - 0xcf,0x86,0xd5,0x80,0xd4,0x34,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04, - 0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x10, - 0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08, - 0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04, - 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,0xd2,0x25,0xd1,0x0f,0x10,0x04,0x00,0x00, - 0x01,0xff,0xe0,0xa8,0x96,0xe0,0xa8,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0x97, - 0xe0,0xa8,0xbc,0x00,0x01,0xff,0xe0,0xa8,0x9c,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10, - 0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0xab,0xe0,0xa8,0xbc,0x00, - 0x00,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00, - 0x01,0x00,0x93,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00, - 0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0xd0,0x82,0xcf,0x86,0xd5,0x40,0xd4,0x2c, - 0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08, - 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04, - 0x07,0x00,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04, - 0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04, - 0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00, - 0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08, - 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00, - 0x91,0x08,0x10,0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x3c,0xd4,0x28, - 0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00, - 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04, - 0x01,0x00,0x01,0x09,0x00,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x91,0x08, - 0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00, - 0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x07,0x00,0x00,0x00,0x00,0x00, - 0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x11,0x00,0x13,0x00,0x13,0x00,0xe1,0x24, - 0x01,0xd0,0x86,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10, - 0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01, - 0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00, - 0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10, - 0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92, - 0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2, - 0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00, - 0x00,0x07,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10, - 0x04,0x01,0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x45,0xd3,0x14,0x52, - 0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01, - 0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x96,0x00, - 0x00,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xad,0x87,0xe0,0xac,0xbe,0x00,0x91, - 0x0f,0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x97,0x00,0x01,0x09,0x00,0x00, - 0xd3,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00, - 0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xac,0xa1,0xe0,0xac,0xbc,0x00,0x01,0xff,0xe0, - 0xac,0xa2,0xe0,0xac,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd4,0x14,0x93,0x10, - 0xd2,0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00, - 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x0c,0x00,0x0c,0x00, - 0x00,0x00,0xd0,0xb1,0xcf,0x86,0xd5,0x63,0xd4,0x28,0xd3,0x14,0xd2,0x08,0x11,0x04, - 0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c, + 0xd3,0x18,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00, + 0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00, + 0x91,0x08,0x10,0x04,0x01,0x07,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x7b,0xd4,0x42, + 0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04, + 0x00,0x00,0x01,0x00,0xd2,0x17,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04, + 0x00,0x00,0x01,0xff,0xe0,0xa7,0x87,0xe0,0xa6,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01, + 0xff,0xe0,0xa7,0x87,0xe0,0xa7,0x97,0x00,0x01,0x09,0x10,0x04,0x08,0x00,0x00,0x00, + 0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00, + 0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe0,0xa6,0xa1,0xe0,0xa6,0xbc, + 0x00,0x01,0xff,0xe0,0xa6,0xa2,0xe0,0xa6,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0xff, + 0xe0,0xa6,0xaf,0xe0,0xa6,0xbc,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x01,0x00,0x11, + 0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01, + 0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x14,0xe6,0x00, + 0x00,0xe2,0x48,0x02,0xe1,0x4f,0x01,0xd0,0xa4,0xcf,0x86,0xd5,0x4c,0xd4,0x34,0xd3, + 0x1c,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x10,0x04,0x01,0x00,0x07, + 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01, + 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01, + 0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00, + 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91, + 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x2e,0xd2,0x17,0xd1, + 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xa8,0xb2, + 0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x0b,0x01,0xff, + 0xe0,0xa8,0xb8,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00, + 0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x00,0x00,0x01,0x00,0xcf,0x86,0xd5,0x80,0xd4, + 0x34,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x51, + 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01, + 0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x01, + 0x09,0x00,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x00, + 0x00,0x00,0x00,0xd2,0x25,0xd1,0x0f,0x10,0x04,0x00,0x00,0x01,0xff,0xe0,0xa8,0x96, + 0xe0,0xa8,0xbc,0x00,0x10,0x0b,0x01,0xff,0xe0,0xa8,0x97,0xe0,0xa8,0xbc,0x00,0x01, + 0xff,0xe0,0xa8,0x9c,0xe0,0xa8,0xbc,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00, + 0x10,0x0b,0x01,0xff,0xe0,0xa8,0xab,0xe0,0xa8,0xbc,0x00,0x00,0x00,0xd4,0x10,0x93, + 0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x14,0x52, + 0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04,0x14,0x00,0x00, + 0x00,0x00,0x00,0xd0,0x82,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91, + 0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01, + 0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x10, + 0x04,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x00, + 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91, + 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x0c,0x91, + 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01, + 0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01, + 0x07,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x10,0x52,0x04,0x01, + 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01, + 0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00, + 0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x07, + 0x00,0x07,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91, + 0x08,0x10,0x04,0x0d,0x00,0x07,0x00,0x00,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10, + 0x04,0x00,0x00,0x11,0x00,0x13,0x00,0x13,0x00,0xe1,0x24,0x01,0xd0,0x86,0xcf,0x86, + 0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00, + 0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00, + 0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x93,0x14, + 0x92,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00, + 0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04, + 0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04, + 0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x01,0x00, + 0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x07,0x01,0x00, + 0x01,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x45,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08, + 0x10,0x04,0x0a,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f, + 0x10,0x0b,0x01,0xff,0xe0,0xad,0x87,0xe0,0xad,0x96,0x00,0x00,0x00,0x10,0x04,0x00, + 0x00,0x01,0xff,0xe0,0xad,0x87,0xe0,0xac,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff, + 0xe0,0xad,0x87,0xe0,0xad,0x97,0x00,0x01,0x09,0x00,0x00,0xd3,0x0c,0x52,0x04,0x00, + 0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x16,0x10,0x0b,0x01, + 0xff,0xe0,0xac,0xa1,0xe0,0xac,0xbc,0x00,0x01,0xff,0xe0,0xac,0xa2,0xe0,0xac,0xbc, + 0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01, + 0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91, + 0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x0c,0x00,0x0c,0x00,0x00,0x00,0xd0,0xb1,0xcf, + 0x86,0xd5,0x63,0xd4,0x28,0xd3,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x91, + 0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10, + 0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd3,0x1f,0xd2,0x0c,0x91, + 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0, + 0xae,0x92,0xe0,0xaf,0x97,0x00,0x01,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04, + 0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x01,0x00, + 0x00,0x00,0x01,0x00,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04, + 0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c, 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00, - 0xd3,0x1f,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x91,0x0f, - 0x10,0x0b,0x01,0xff,0xe0,0xae,0x92,0xe0,0xaf,0x97,0x00,0x01,0x00,0x00,0x00,0xd2, - 0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91, - 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x51, - 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00, - 0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11, - 0x04,0x00,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10, - 0x04,0x08,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00, - 0x00,0x01,0x00,0xcf,0x86,0xd5,0x61,0xd4,0x45,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x01, - 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1, - 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0,0xae, - 0xbe,0x00,0x01,0xff,0xe0,0xaf,0x87,0xe0,0xae,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01, - 0xff,0xe0,0xaf,0x86,0xe0,0xaf,0x97,0x00,0x01,0x09,0x00,0x00,0x93,0x18,0xd2,0x0c, - 0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04, - 0x00,0x00,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04, - 0x00,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04, - 0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00, - 0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xe3,0x1c,0x04,0xe2,0x1a,0x02,0xd1,0xf3, - 0xd0,0x76,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04, - 0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00, - 0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10, - 0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00, - 0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00, - 0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x10,0x00, - 0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04, - 0x00,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x53,0xd4,0x2f,0xd3,0x10,0x52,0x04, - 0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd2,0x13,0x91,0x0f, - 0x10,0x0b,0x01,0xff,0xe0,0xb1,0x86,0xe0,0xb1,0x96,0x00,0x00,0x00,0x01,0x00,0x91, - 0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1, - 0x08,0x10,0x04,0x00,0x00,0x01,0x54,0x10,0x04,0x01,0x5b,0x00,0x00,0x92,0x0c,0x51, - 0x04,0x0a,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2, - 0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93, - 0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0a, - 0x00,0xd0,0x76,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10, - 0x04,0x12,0x00,0x10,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01, - 0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93, - 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01, - 0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00, - 0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x00, - 0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10, - 0x04,0x07,0x07,0x07,0x00,0x01,0x00,0xcf,0x86,0xd5,0x82,0xd4,0x5e,0xd3,0x2a,0xd2, - 0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb2,0xbf,0xe0,0xb3,0x95,0x00,0x01,0x00, - 0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff, - 0xe0,0xb3,0x86,0xe0,0xb3,0x95,0x00,0xd2,0x28,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0, - 0xb3,0x86,0xe0,0xb3,0x96,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0, - 0xb3,0x82,0x00,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x82,0xe0,0xb3,0x95,0x00,0x91, - 0x08,0x10,0x04,0x01,0x00,0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1, - 0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x00, - 0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2, - 0x08,0x11,0x04,0x01,0x00,0x09,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93, - 0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0xe1,0x06,0x01,0xd0,0x6e,0xcf,0x86,0xd5,0x3c,0xd4,0x28, - 0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x10,0x00,0x01,0x00,0x91,0x08, - 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04, - 0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00, - 0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c, - 0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00, - 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x0c,0x00,0x13,0x09,0x91,0x08,0x10,0x04, - 0x13,0x09,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x65,0xd4,0x45,0xd3,0x10,0x52,0x04, - 0x01,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08, - 0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb4,0xbe, - 0x00,0x01,0xff,0xe0,0xb5,0x87,0xe0,0xb4,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff, - 0xe0,0xb5,0x86,0xe0,0xb5,0x97,0x00,0x01,0x09,0x10,0x04,0x0c,0x00,0x12,0x00,0xd3, - 0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x01,0x00,0x52, - 0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x11,0x00,0xd4,0x14,0x93, - 0x10,0xd2,0x08,0x11,0x04,0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01, - 0x00,0xd3,0x0c,0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x12,0x00,0x92,0x0c,0x91, - 0x08,0x10,0x04,0x12,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x5a,0xcf,0x86,0xd5, - 0x34,0xd4,0x18,0x93,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10, - 0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x51, - 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x04, - 0x00,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10, - 0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x00, - 0x00,0x04,0x00,0x00,0x00,0xcf,0x86,0xd5,0x77,0xd4,0x28,0xd3,0x10,0x52,0x04,0x04, - 0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x00, - 0x00,0x10,0x04,0x04,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x04, - 0x00,0xd3,0x14,0x52,0x04,0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x10, - 0x04,0x04,0x00,0x00,0x00,0xd2,0x13,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe0, - 0xb7,0x99,0xe0,0xb7,0x8a,0x00,0x04,0x00,0xd1,0x19,0x10,0x0b,0x04,0xff,0xe0,0xb7, - 0x99,0xe0,0xb7,0x8f,0x00,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0xe0,0xb7,0x8a, - 0x00,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x9f,0x00,0x04,0x00,0xd4,0x10, - 0x93,0x0c,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x14, - 0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0xe2,0x31,0x01,0xd1,0x58,0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94, - 0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01, - 0x00,0x01,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51, - 0x04,0x01,0x67,0x10,0x04,0x01,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00, - 0x00,0x01,0x00,0xcf,0x86,0x95,0x18,0xd4,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01, - 0x6b,0x01,0x00,0x53,0x04,0x01,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd0, - 0x9e,0xcf,0x86,0xd5,0x54,0xd4,0x3c,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00, - 0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00, - 0x00,0x10,0x04,0x15,0x00,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x15, - 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x15, - 0x00,0xd3,0x08,0x12,0x04,0x15,0x00,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15, - 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10, - 0x04,0x15,0x00,0x01,0x00,0x01,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10, - 0x04,0x00,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x15,0x00,0x01,0x00,0x91,0x08,0x10, - 0x04,0x15,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01, - 0x76,0x10,0x04,0x15,0x09,0x01,0x00,0x11,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0x95, - 0x34,0xd4,0x20,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00, - 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x01,0x7a,0x11,0x04,0x01,0x00,0x00, - 0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x01, - 0x00,0x0d,0x00,0x00,0x00,0xe1,0x2b,0x01,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04, - 0x02,0x00,0x53,0x04,0x02,0x00,0x92,0x08,0x11,0x04,0x02,0xdc,0x02,0x00,0x02,0x00, - 0x54,0x04,0x02,0x00,0xd3,0x14,0x52,0x04,0x02,0x00,0xd1,0x08,0x10,0x04,0x02,0x00, - 0x02,0xdc,0x10,0x04,0x02,0x00,0x02,0xdc,0x92,0x0c,0x91,0x08,0x10,0x04,0x02,0x00, - 0x02,0xd8,0x02,0x00,0x02,0x00,0xcf,0x86,0xd5,0x73,0xd4,0x36,0xd3,0x17,0x92,0x13, - 0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x82,0xe0,0xbe,0xb7, - 0x00,0x02,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91, - 0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x8c,0xe0,0xbe,0xb7,0x00,0x02,0x00, - 0xd3,0x26,0xd2,0x13,0x51,0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x91,0xe0, - 0xbe,0xb7,0x00,0x02,0x00,0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0, - 0xbd,0x96,0xe0,0xbe,0xb7,0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff, - 0xe0,0xbd,0x9b,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x02,0x00,0xd4,0x27,0x53,0x04,0x02, - 0x00,0xd2,0x17,0xd1,0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x80,0xe0,0xbe, - 0xb5,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00, - 0x00,0x00,0xd3,0x35,0xd2,0x17,0xd1,0x08,0x10,0x04,0x00,0x00,0x02,0x81,0x10,0x04, - 0x02,0x82,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbd,0xb2,0x00,0xd1,0x0f,0x10,0x04,0x02, - 0x84,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbd,0xb4,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe, - 0xb2,0xe0,0xbe,0x80,0x00,0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0, - 0xbe,0xb3,0xe0,0xbe,0x80,0x00,0x02,0x00,0x02,0x82,0x11,0x04,0x02,0x82,0x02,0x00, - 0xd0,0xd3,0xcf,0x86,0xd5,0x65,0xd4,0x27,0xd3,0x1f,0xd2,0x13,0x91,0x0f,0x10,0x04, - 0x02,0x82,0x02,0xff,0xe0,0xbd,0xb1,0xe0,0xbe,0x80,0x00,0x02,0xe6,0x91,0x08,0x10, - 0x04,0x02,0x09,0x02,0x00,0x02,0xe6,0x12,0x04,0x02,0x00,0x0c,0x00,0xd3,0x1f,0xd2, - 0x13,0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x92,0xe0,0xbe, - 0xb7,0x00,0x51,0x04,0x02,0x00,0x10,0x04,0x04,0x00,0x02,0x00,0xd2,0x0c,0x91,0x08, - 0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00,0x02,0xff, - 0xe0,0xbe,0x9c,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd4,0x3d,0xd3,0x26,0xd2,0x13,0x51, - 0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xa1,0xe0,0xbe,0xb7,0x00,0x02,0x00, - 0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0xa6,0xe0,0xbe,0xb7, - 0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xab,0xe0,0xbe, - 0xb7,0x00,0x02,0x00,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00, - 0x02,0x00,0x02,0x00,0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x04,0x04,0x00,0x02,0xff, - 0xe0,0xbe,0x90,0xe0,0xbe,0xb5,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00, - 0x00,0x04,0x00,0xcf,0x86,0x95,0x4c,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51, - 0x04,0x04,0x00,0x10,0x04,0x04,0xdc,0x04,0x00,0x52,0x04,0x04,0x00,0xd1,0x08,0x10, - 0x04,0x04,0x00,0x00,0x00,0x10,0x04,0x0a,0x00,0x04,0x00,0xd3,0x14,0xd2,0x08,0x11, - 0x04,0x08,0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92, - 0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0xcf,0x86,0xe5,0xcc,0x04,0xe4,0x63,0x03,0xe3,0x65,0x01,0xe2,0x04, - 0x01,0xd1,0x7f,0xd0,0x65,0xcf,0x86,0x55,0x04,0x04,0x00,0xd4,0x33,0xd3,0x1f,0xd2, - 0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x0a,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10, - 0x0b,0x04,0xff,0xe1,0x80,0xa5,0xe1,0x80,0xae,0x00,0x04,0x00,0x92,0x10,0xd1,0x08, - 0x10,0x04,0x0a,0x00,0x04,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x04,0x00,0xd3,0x18, - 0xd2,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x51,0x04,0x0a,0x00, - 0x10,0x04,0x04,0x00,0x04,0x07,0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0x09, - 0x10,0x04,0x0a,0x09,0x0a,0x00,0x0a,0x00,0xcf,0x86,0x95,0x14,0x54,0x04,0x04,0x00, - 0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00, - 0xd0,0x2e,0xcf,0x86,0x95,0x28,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00, - 0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08, - 0x11,0x04,0x0a,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86, - 0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04, - 0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d,0x00, - 0x00,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04, - 0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x06,0x00, - 0x08,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0d,0x00, - 0x0d,0x00,0xd1,0x28,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x1c,0x54,0x04, - 0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x51,0x04, - 0x0b,0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04, - 0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, - 0x0b,0x00,0x0b,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04, - 0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x0b,0x00, - 0xe2,0x21,0x01,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x52, - 0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x04, - 0x00,0x04,0x00,0xcf,0x86,0x95,0x48,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51, - 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04, - 0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x04, - 0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10, - 0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd0, - 0x62,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04, - 0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00, - 0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd4,0x14,0x53,0x04,0x04, - 0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3, - 0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04, - 0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00, - 0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04, - 0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51, - 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x93,0x10,0x52,0x04,0x04,0x00,0x51, - 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x94,0x14,0x53,0x04,0x04, - 0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04, - 0x00,0xd1,0x9c,0xd0,0x3e,0xcf,0x86,0x95,0x38,0xd4,0x14,0x53,0x04,0x04,0x00,0x52, - 0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3,0x14,0xd2, - 0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00, - 0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04, - 0x00,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04, - 0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0xd2,0x0c,0x51, - 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x0c, - 0xe6,0x10,0x04,0x0c,0xe6,0x08,0xe6,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10, - 0x04,0x08,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0x52, - 0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf, - 0x86,0x95,0x14,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08, - 0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04, - 0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x11,0x00,0x00, - 0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0xd3,0x30,0xd2,0x2a,0xd1, - 0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10, - 0x04,0x0b,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf, - 0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd2,0x6c,0xd1,0x24,0xd0, - 0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93, - 0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0b,0x00,0x0b, - 0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x52, - 0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf, - 0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10, - 0x04,0x04,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10, - 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x46,0xcf,0x86,0xd5,0x28,0xd4, - 0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x00, - 0x00,0x06,0x00,0x93,0x10,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x09,0x00, - 0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x06,0x00,0x93,0x14,0x52,0x04,0x06,0x00,0xd1, - 0x08,0x10,0x04,0x06,0x09,0x06,0x00,0x10,0x04,0x06,0x00,0x00,0x00,0x00,0x00,0xcf, - 0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x06,0x00,0x00,0x00,0x00, - 0x00,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06, - 0x00,0x00,0x00,0x06,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x00, - 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0xd5, - 0x24,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04, - 0x09,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x07, - 0xe6,0x00,0x00,0xd4,0x10,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x00, - 0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x08,0x11,0x04,0x07,0x00,0x00,0x00,0x00, - 0x00,0xe4,0xac,0x03,0xe3,0x4d,0x01,0xd2,0x84,0xd1,0x48,0xd0,0x2a,0xcf,0x86,0x95, - 0x24,0xd4,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10, - 0x04,0x04,0x00,0x00,0x00,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00,0x00, - 0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x53, - 0x04,0x04,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x04,0x00,0x94,0x18,0x53,0x04,0x04,0x00,0x92, - 0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0xe4,0x10,0x04,0x0a,0x00,0x00,0x00,0x00, - 0x00,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c,0x52, - 0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x42,0xcf, - 0x86,0xd5,0x1c,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0xd1, - 0x08,0x10,0x04,0x07,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd4,0x0c,0x53, - 0x04,0x07,0x00,0x12,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x10,0xd1, - 0x08,0x10,0x04,0x07,0x00,0x07,0xde,0x10,0x04,0x07,0xe6,0x07,0xdc,0x00,0x00,0xcf, - 0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x00, - 0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd4,0x10,0x53,0x04,0x07,0x00,0x52, - 0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x07,0x00,0x91, - 0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55, - 0x04,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x0b, - 0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x95,0x28,0xd4,0x10,0x53,0x04,0x08,0x00,0x92, - 0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51, - 0x04,0x08,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x08,0x00,0x07, - 0x00,0xd2,0xe4,0xd1,0x80,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x54,0x04,0x08,0x00,0xd3, - 0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x08,0xe6,0xd2, - 0x0c,0x91,0x08,0x10,0x04,0x08,0xdc,0x08,0x00,0x08,0x00,0x11,0x04,0x00,0x00,0x08, - 0x00,0x0b,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52, - 0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xd4,0x14,0x93, - 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x0b, - 0x00,0xd3,0x10,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x0b, - 0xe6,0x52,0x04,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xe6,0x00,0x00,0x10,0x04,0x00, - 0x00,0x0b,0xdc,0xd0,0x5e,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0b,0x00,0x92, - 0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,0x11, - 0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd4,0x10,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b, - 0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x10,0xe6,0x91,0x08,0x10, - 0x04,0x10,0xe6,0x10,0xdc,0x10,0xdc,0xd2,0x0c,0x51,0x04,0x10,0xdc,0x10,0x04,0x10, - 0xdc,0x10,0xe6,0xd1,0x08,0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0x04,0x10,0x00,0x00, - 0x00,0xcf,0x06,0x00,0x00,0xe1,0x1e,0x01,0xd0,0xaa,0xcf,0x86,0xd5,0x6e,0xd4,0x53, - 0xd3,0x17,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac, - 0x85,0xe1,0xac,0xb5,0x00,0x09,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1, - 0xac,0x87,0xe1,0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x89,0xe1, - 0xac,0xb5,0x00,0x09,0x00,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8b,0xe1,0xac, - 0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8d,0xe1,0xac,0xb5,0x00,0x09, - 0x00,0x93,0x17,0x92,0x13,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x91, - 0xe1,0xac,0xb5,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x54,0x04,0x09,0x00,0xd3,0x10, - 0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x07,0x09,0x00,0x09,0x00,0xd2,0x13, - 0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xba,0xe1,0xac,0xb5, - 0x00,0x91,0x0f,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xbc,0xe1,0xac,0xb5,0x00, - 0x09,0x00,0xcf,0x86,0xd5,0x3d,0x94,0x39,0xd3,0x31,0xd2,0x25,0xd1,0x16,0x10,0x0b, - 0x09,0xff,0xe1,0xac,0xbe,0xe1,0xac,0xb5,0x00,0x09,0xff,0xe1,0xac,0xbf,0xe1,0xac, - 0xb5,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xad,0x82,0xe1,0xac,0xb5,0x00,0x91, - 0x08,0x10,0x04,0x09,0x09,0x09,0x00,0x09,0x00,0x12,0x04,0x09,0x00,0x00,0x00,0x09, - 0x00,0xd4,0x1c,0x53,0x04,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09, - 0x00,0x09,0xe6,0x91,0x08,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0xe6,0xd3,0x08,0x12, - 0x04,0x09,0xe6,0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x00, - 0x00,0x00,0x00,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x18,0x53,0x04,0x0a, - 0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x09,0x0d,0x09,0x11,0x04,0x0d, - 0x00,0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x0d,0x00,0x0d, - 0x00,0xcf,0x86,0x55,0x04,0x0c,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x0c,0x00,0x51, - 0x04,0x0c,0x00,0x10,0x04,0x0c,0x07,0x0c,0x00,0x0c,0x00,0xd3,0x0c,0x92,0x08,0x11, - 0x04,0x0c,0x00,0x0c,0x09,0x00,0x00,0x12,0x04,0x00,0x00,0x0c,0x00,0xe3,0xb2,0x01, - 0xe2,0x09,0x01,0xd1,0x4c,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04,0x0a, - 0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a, - 0x07,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0xcf, - 0x86,0x95,0x1c,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,0x00, - 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0, - 0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10, - 0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x54,0x04,0x14,0x00,0x53, - 0x04,0x14,0x00,0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x91, - 0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x14,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x08,0x13, - 0x04,0x0d,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04,0x0b, - 0xe6,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x01,0x0b,0xdc,0x0b,0xdc,0x92,0x08,0x11, - 0x04,0x0b,0xdc,0x0b,0xe6,0x0b,0xdc,0xd4,0x28,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10, - 0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x01,0x0b,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b, - 0x01,0x0b,0x00,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xdc,0x0b,0x00,0xd3, - 0x1c,0xd2,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0d,0x00,0xd1,0x08,0x10, - 0x04,0x0d,0xe6,0x0d,0x00,0x10,0x04,0x0d,0x00,0x13,0x00,0x92,0x0c,0x51,0x04,0x10, - 0xe6,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x07, - 0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x94,0x0c,0x53,0x04,0x07,0x00,0x12,0x04,0x07, - 0x00,0x08,0x00,0x08,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0xd5,0x40,0xd4, - 0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0xe6,0x10,0x04,0x08,0xdc,0x08,0xe6,0x09, - 0xe6,0xd2,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x0a,0xe6,0xd1,0x08,0x10, - 0x04,0x0a,0xe6,0x0a,0xea,0x10,0x04,0x0a,0xd6,0x0a,0xdc,0x93,0x10,0x92,0x0c,0x91, - 0x08,0x10,0x04,0x0a,0xca,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0xd4,0x14,0x93, - 0x10,0x52,0x04,0x0a,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xe6,0x10, - 0xe6,0xd3,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x13,0xe8,0x13, - 0xe4,0xd2,0x10,0xd1,0x08,0x10,0x04,0x13,0xe4,0x13,0xdc,0x10,0x04,0x00,0x00,0x12, - 0xe6,0xd1,0x08,0x10,0x04,0x0c,0xe9,0x0b,0xdc,0x10,0x04,0x09,0xe6,0x09,0xdc,0xe2, - 0x80,0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84,0xd3, - 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa5,0x00,0x01,0xff,0x61, - 0xcc,0xa5,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc,0x87, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x42,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc,0xa3, - 0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00,0xd2, - 0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x43,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff,0x63, - 0xcc,0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x87,0x00,0x01,0xff,0x64, - 0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa3,0x00,0x01,0xff,0x64, - 0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc,0xb1, - 0x00,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa7,0x00,0x01, - 0xff,0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xad,0x00,0x01,0xff,0x64, - 0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x80,0x00,0x01, - 0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x81, - 0x00,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0x45,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x45, - 0xcc,0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45, - 0xcc,0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08,0x01, - 0xff,0x46,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40,0xd2, - 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc,0x84, - 0x00,0x10,0x08,0x01,0xff,0x48,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00,0x10, - 0x08,0x01,0xff,0x48,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20,0xd1, - 0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00,0x10, - 0x08,0x01,0xff,0x48,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10,0x10, - 0x08,0x01,0xff,0x49,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a,0x01, - 0xff,0x49,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00,0xd3, - 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x81,0x00,0x01,0xff,0x6b, - 0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc,0xa3, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc,0xb1, - 0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00,0xd2, - 0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff,0x6c, - 0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xb1,0x00,0x01,0xff,0x6c, - 0xcc,0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4c,0xcc,0xad,0x00,0x01,0xff,0x6c, - 0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x4d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc,0x81, - 0x00,0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x4d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01,0xff, - 0x4d,0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x4e,0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x4e,0xcc, - 0xa3,0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x4e,0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x4e,0xcc, - 0xad,0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc, - 0x83,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff, - 0x4f,0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3,0x48, - 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x80,0x00,0x01,0xff, - 0x6f,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x81,0x00, - 0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x50,0xcc, - 0x81,0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x50,0xcc,0x87,0x00, - 0x01,0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52,0xcc, - 0x87,0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa3,0x00, - 0x01,0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x52,0xcc,0xa3,0xcc, - 0x84,0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x52,0xcc, - 0xb1,0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x01,0xff,0x53,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10,0x08, - 0x01,0xff,0x53,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a, - 0x01,0xff,0x53,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87,0x00, - 0x10,0x0a,0x01,0xff,0x53,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c,0xcc, - 0x87,0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x53,0xcc,0xa3,0xcc,0x87,0x00, - 0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0x87,0x00, - 0x01,0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0xa3,0x00, - 0x01,0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xb1,0x00,0x01,0xff, - 0x74,0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc, - 0xad,0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa4,0x00, - 0x01,0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0xb0,0x00, - 0x01,0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xad,0x00,0x01,0xff, - 0x75,0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x83,0xcc, - 0x81,0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc, - 0x84,0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10,0x08, - 0x01,0xff,0x56,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01,0xff, - 0x56,0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x10,0x02,0xcf,0x86,0xd5, - 0xe1,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x80, - 0x00,0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x81,0x00,0x01, - 0xff,0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x88,0x00,0x01, - 0xff,0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x87,0x00,0x01,0xff,0x77, - 0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0xa3,0x00,0x01, - 0xff,0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x58,0xcc,0x87,0x00,0x01,0xff,0x78, - 0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x58,0xcc,0x88,0x00,0x01,0xff,0x78, - 0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc,0x87, - 0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0x82,0x00,0x01, - 0xff,0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x5a,0xcc,0xa3,0x00,0x01,0xff,0x7a, - 0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0xb1,0x00,0x01,0xff,0x7a, - 0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x68,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc,0x88, - 0x00,0x92,0x1d,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff,0x79, - 0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x02,0xff,0xc5,0xbf,0xcc,0x87,0x00,0x0a,0x00, - 0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa3,0x00, - 0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x89,0x00,0x01,0xff, - 0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x81,0x00, - 0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc, - 0x80,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a, - 0x01,0xff,0x41,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89,0x00, - 0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc, - 0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff, - 0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x81,0x00, - 0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a, - 0x01,0xff,0x41,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80,0x00, - 0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc, - 0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x83,0x00,0x01,0xff, - 0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x86,0x00, - 0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0x45,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x45,0xcc, - 0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45,0xcc, - 0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc, - 0x81,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01,0xd4, - 0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc,0x80, - 0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82, - 0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01, - 0xff,0x45,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00,0x10, - 0x0a,0x01,0xff,0x45,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc,0x82, - 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x89,0x00,0x01,0xff,0x69, - 0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc,0xa3, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc,0xa3, - 0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00,0xd3, - 0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x81,0x00,0x01, - 0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x80, - 0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f, - 0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a,0x01, - 0xff,0x4f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00,0xd2, - 0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x6f, - 0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x81,0x00,0x01, - 0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b, - 0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f, - 0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98,0xd3, - 0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x83,0x00,0x01, - 0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0xa3, - 0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55, - 0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0x89, - 0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55, - 0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a,0x01, - 0xff,0x55,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00,0xd1, - 0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc,0x9b, - 0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff,0x75, - 0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55, - 0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08,0x01, - 0xff,0x59,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08,0x01, - 0xff,0x59,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x59, - 0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0x92,0x14,0x91,0x10,0x10,0x08,0x01, - 0xff,0x59,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x0a,0x00,0x0a,0x00,0xe1, - 0xc0,0x04,0xe0,0x80,0x02,0xcf,0x86,0xe5,0x2d,0x01,0xd4,0xa8,0xd3,0x54,0xd2,0x28, - 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1,0xcc, - 0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce, - 0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93, - 0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff, - 0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0x00, - 0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x93,0x00,0x01,0xff,0xce, - 0x91,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0x00,0x01, - 0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x91, - 0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b, - 0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd, - 0x82,0x00,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x93, - 0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93, - 0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b, - 0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc, - 0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x93, - 0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x95,0xcc,0x93, - 0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b, - 0x01,0xff,0xce,0x95,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc, - 0x81,0x00,0x00,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff, - 0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff, - 0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0x00, - 0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce, - 0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x82, - 0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09, - 0x01,0xff,0xce,0x97,0xcc,0x93,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0x00,0x10,0x0b, - 0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc, - 0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0x00,0x01, - 0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93, - 0xcd,0x82,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x54,0xd2,0x28, - 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9,0xcc, - 0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce, - 0xb9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93, - 0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff, - 0xce,0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82,0x00, - 0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x93,0x00,0x01,0xff,0xce, - 0x99,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcc,0x80,0x00,0x01, - 0xff,0xce,0x99,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x99, - 0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b, - 0x01,0xff,0xce,0x99,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcd, - 0x82,0x00,0xcf,0x86,0xe5,0x13,0x01,0xd4,0x84,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10, - 0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00,0x10, - 0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94, - 0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81,0x00, - 0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10, - 0x09,0x01,0xff,0xce,0x9f,0xcc,0x93,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0x00,0x10, - 0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94, - 0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x81,0x00, - 0x01,0xff,0xce,0x9f,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x54,0xd2,0x28,0xd1, - 0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94, - 0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85, - 0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc, - 0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf, - 0x85,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00,0xd2, - 0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0x00,0x10,0x04, - 0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f,0x10,0x04,0x00, - 0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00,0x00,0x01,0xff, - 0xce,0xa5,0xcc,0x94,0xcd,0x82,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10, - 0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00,0x10, - 0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94, - 0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81,0x00, - 0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc, - 0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1, - 0x12,0x10,0x09,0x01,0xff,0xce,0xa9,0xcc,0x93,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94, - 0x00,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xa9, - 0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc, - 0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce, - 0xa9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0x00,0xd3, - 0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01,0xff, - 0xce,0xb1,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01,0xff, - 0xce,0xb5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80,0x00, - 0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80,0x00, - 0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce, - 0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xcf, - 0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x91,0x12,0x10,0x09,0x01, - 0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x00,0x00,0xe0, - 0xe1,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10, - 0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94, - 0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00, - 0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01, - 0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94, - 0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0xcd, - 0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1, - 0x16,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0x91, - 0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0xcd, - 0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10, - 0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0x91, - 0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd, - 0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3, - 0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x85,0x00, - 0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc, - 0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xcd,0x85, - 0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00, - 0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce, - 0xb7,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82, - 0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcd, - 0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce, - 0x97,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x80, - 0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0xcd, - 0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01, - 0xff,0xce,0x97,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94, - 0xcd,0x82,0xcd,0x85,0x00,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01, - 0xff,0xcf,0x89,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x85, - 0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff, - 0xcf,0x89,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xcf, - 0x89,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81, - 0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00, - 0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10, - 0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94, - 0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00, - 0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01, - 0xff,0xce,0xa9,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94, - 0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x82,0xcd, - 0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3,0x49,0xd2, - 0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce,0xb1, - 0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff, - 0xce,0xb1,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81,0xcd, - 0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff,0xce, - 0xb1,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91, - 0xcc,0x86,0x00,0x01,0xff,0xce,0x91,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x91, - 0xcc,0x80,0x00,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff, - 0xce,0x91,0xcd,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xb9,0x00,0x01,0x00, - 0xcf,0x86,0xe5,0x16,0x01,0xd4,0x8f,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04,0x01, - 0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x80, - 0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff, - 0xce,0xb7,0xcc,0x81,0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7,0xcd, - 0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10, - 0x09,0x01,0xff,0xce,0x95,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x81,0x00,0x10, - 0x09,0x01,0xff,0xce,0x97,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00,0xd1, - 0x13,0x10,0x09,0x01,0xff,0xce,0x97,0xcd,0x85,0x00,0x01,0xff,0xe1,0xbe,0xbf,0xcc, - 0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbe,0xbf,0xcc,0x81,0x00,0x01,0xff,0xe1,0xbe, - 0xbf,0xcd,0x82,0x00,0xd3,0x40,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9, - 0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9, - 0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x51,0x04, - 0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc, - 0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x86, - 0x00,0x01,0xff,0xce,0x99,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x80, - 0x00,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x04,0x00,0x00,0x01,0xff, - 0xe1,0xbf,0xbe,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbf,0xbe,0xcc,0x81,0x00, - 0x01,0xff,0xe1,0xbf,0xbe,0xcd,0x82,0x00,0xd4,0x93,0xd3,0x4e,0xd2,0x28,0xd1,0x12, - 0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84,0x00, - 0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc, - 0x88,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x93,0x00,0x01, - 0xff,0xcf,0x81,0xcc,0x94,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcd,0x82,0x00,0x01, - 0xff,0xcf,0x85,0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff, - 0xce,0xa5,0xcc,0x86,0x00,0x01,0xff,0xce,0xa5,0xcc,0x84,0x00,0x10,0x09,0x01,0xff, - 0xce,0xa5,0xcc,0x80,0x00,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09, - 0x01,0xff,0xce,0xa1,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00,0x10,0x09, - 0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x01,0xff,0x60,0x00,0xd3,0x3b,0xd2,0x18,0x51, - 0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff, - 0xcf,0x89,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81,0xcd, - 0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd,0x82,0x00,0x01,0xff,0xcf, - 0x89,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x9f, - 0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xa9, - 0xcc,0x80,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0xd1,0x10,0x10,0x09,0x01,0xff, - 0xce,0xa9,0xcd,0x85,0x00,0x01,0xff,0xc2,0xb4,0x00,0x10,0x04,0x01,0x00,0x00,0x00, - 0xe0,0x62,0x0c,0xcf,0x86,0xe5,0x9f,0x08,0xe4,0xf8,0x05,0xe3,0xdb,0x02,0xe2,0xa1, - 0x01,0xd1,0xb4,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18,0x92,0x14,0x91, - 0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83,0x00,0x01, - 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x53,0x04,0x01,0x00,0x52,0x04,0x01, - 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x01,0x00,0xcf,0x86,0xd5, - 0x48,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01, - 0x00,0x06,0x00,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x06,0x00,0xd3,0x1c,0xd2, - 0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0xd1,0x08,0x10,0x04,0x07, - 0x00,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08, - 0x00,0x10,0x04,0x08,0x00,0x06,0x00,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x06,0x00,0x91, - 0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x0f,0x00,0x92,0x08,0x11,0x04,0x0f,0x00,0x01, - 0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x00, - 0x00,0x01,0x00,0x01,0x00,0xd0,0x7e,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x53,0x04,0x01, - 0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd3, - 0x10,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0c,0x00,0x0c,0x00,0x52, - 0x04,0x0c,0x00,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xd4,0x1c,0x53, - 0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x02,0x00,0x91, - 0x08,0x10,0x04,0x03,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0xd2,0x08,0x11,0x04,0x06, - 0x00,0x08,0x00,0x11,0x04,0x08,0x00,0x0b,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0b, - 0x00,0x0c,0x00,0x10,0x04,0x0e,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11, - 0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0x54,0x04,0x00,0x00,0xd3,0x0c,0x92,0x08,0x11, - 0x04,0x01,0xe6,0x01,0x01,0x01,0xe6,0xd2,0x0c,0x51,0x04,0x01,0x01,0x10,0x04,0x01, - 0x01,0x01,0xe6,0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3, - 0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x04,0x00,0xd1,0x08,0x10, - 0x04,0x06,0x00,0x06,0x01,0x10,0x04,0x06,0x01,0x06,0xe6,0x92,0x10,0xd1,0x08,0x10, - 0x04,0x06,0xdc,0x06,0xe6,0x10,0x04,0x06,0x01,0x08,0x01,0x09,0xdc,0x93,0x10,0x92, - 0x0c,0x91,0x08,0x10,0x04,0x0a,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1, - 0x81,0xd0,0x4f,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01, - 0x00,0x51,0x04,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xa9,0x00,0x01,0x00,0x92,0x12, - 0x51,0x04,0x01,0x00,0x10,0x06,0x01,0xff,0x4b,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00, - 0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00, - 0x10,0x04,0x04,0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x06,0x00,0x06,0x00, - 0xcf,0x86,0x95,0x2c,0xd4,0x18,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0xd1,0x08, - 0x10,0x04,0x08,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x93,0x10,0x92,0x0c, - 0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00, - 0xd0,0x68,0xcf,0x86,0xd5,0x48,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00, - 0x10,0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00, - 0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x11,0x00,0x00,0x00,0x53,0x04, - 0x01,0x00,0x92,0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x90,0xcc, - 0xb8,0x00,0x01,0xff,0xe2,0x86,0x92,0xcc,0xb8,0x00,0x01,0x00,0x94,0x1a,0x53,0x04, - 0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86, - 0x94,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0x53,0x04, - 0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x87, - 0x90,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x87,0x94,0xcc,0xb8,0x00,0x01,0xff, - 0xe2,0x87,0x92,0xcc,0xb8,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c, - 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x93,0x08,0x12,0x04, - 0x04,0x00,0x06,0x00,0x06,0x00,0xe2,0x38,0x02,0xe1,0x3f,0x01,0xd0,0x68,0xcf,0x86, - 0xd5,0x3e,0x94,0x3a,0xd3,0x16,0x52,0x04,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff, - 0xe2,0x88,0x83,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd2,0x12,0x91,0x0e,0x10,0x04, - 0x01,0x00,0x01,0xff,0xe2,0x88,0x88,0xcc,0xb8,0x00,0x01,0x00,0x91,0x0e,0x10,0x0a, - 0x01,0xff,0xe2,0x88,0x8b,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x24, - 0x93,0x20,0x52,0x04,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa3,0xcc, - 0xb8,0x00,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa5,0xcc,0xb8,0x00,0x01,0x00, - 0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x48,0x94,0x44,0xd3,0x2e,0xd2,0x12,0x91,0x0e, - 0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x88,0xbc,0xcc,0xb8,0x00,0x01,0x00,0xd1,0x0e, - 0x10,0x0a,0x01,0xff,0xe2,0x89,0x83,0xcc,0xb8,0x00,0x01,0x00,0x10,0x04,0x01,0x00, - 0x01,0xff,0xe2,0x89,0x85,0xcc,0xb8,0x00,0x92,0x12,0x91,0x0e,0x10,0x04,0x01,0x00, - 0x01,0xff,0xe2,0x89,0x88,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x40, - 0xd3,0x1e,0x92,0x1a,0xd1,0x0c,0x10,0x08,0x01,0xff,0x3d,0xcc,0xb8,0x00,0x01,0x00, - 0x10,0x0a,0x01,0xff,0xe2,0x89,0xa1,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x52,0x04, - 0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x89,0x8d,0xcc,0xb8,0x00, - 0x10,0x08,0x01,0xff,0x3c,0xcc,0xb8,0x00,0x01,0xff,0x3e,0xcc,0xb8,0x00,0xd3,0x30, - 0xd2,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xa4,0xcc,0xb8,0x00,0x01,0xff, - 0xe2,0x89,0xa5,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89, - 0xb2,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xb3,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18, - 0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xb6,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89, - 0xb7,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x50,0x94,0x4c, - 0xd3,0x30,0xd2,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xba,0xcc,0xb8,0x00, - 0x01,0xff,0xe2,0x89,0xbb,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff, - 0xe2,0x8a,0x82,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x83,0xcc,0xb8,0x00,0x01,0x00, - 0x92,0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x86,0xcc,0xb8,0x00,0x01,0xff, - 0xe2,0x8a,0x87,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x30,0x53,0x04, - 0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa2,0xcc, - 0xb8,0x00,0x01,0xff,0xe2,0x8a,0xa8,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a, - 0xa9,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xab,0xcc,0xb8,0x00,0x01,0x00,0xcf,0x86, - 0x55,0x04,0x01,0x00,0xd4,0x5c,0xd3,0x2c,0x92,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff, - 0xe2,0x89,0xbc,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xbd,0xcc,0xb8,0x00,0x10,0x0a, - 0x01,0xff,0xe2,0x8a,0x91,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x92,0xcc,0xb8,0x00, - 0x01,0x00,0xd2,0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb2,0xcc, - 0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb3,0xcc,0xb8,0x00,0x91,0x14,0x10,0x0a,0x01,0xff, - 0xe2,0x8a,0xb4,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb5,0xcc,0xb8,0x00,0x01,0x00, - 0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xd1,0x64, - 0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04, - 0x01,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x20,0x53,0x04, - 0x01,0x00,0x92,0x18,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x80,0x88,0x00, - 0x10,0x08,0x01,0xff,0xe3,0x80,0x89,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86, - 0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04, - 0x01,0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00, - 0x04,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00, - 0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x06,0x00,0x06,0x00, - 0xcf,0x86,0xd5,0x2c,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04, - 0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04, - 0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x12,0x04,0x08,0x00,0x09,0x00,0xd4,0x14, - 0x53,0x04,0x09,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x0c,0x00, - 0x0c,0x00,0xd3,0x08,0x12,0x04,0x0c,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10,0x00, - 0x10,0x04,0x10,0x00,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x13,0x00, - 0xd3,0xa6,0xd2,0x74,0xd1,0x40,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18, - 0x93,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04, - 0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04, - 0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00, - 0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, - 0x06,0x00,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00, - 0x10,0x04,0x06,0x00,0x07,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x1a,0xcf,0x86, - 0x95,0x14,0x54,0x04,0x01,0x00,0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00, - 0x06,0x00,0x06,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00, - 0x13,0x04,0x04,0x00,0x06,0x00,0xd2,0xdc,0xd1,0x48,0xd0,0x26,0xcf,0x86,0x95,0x20, - 0x54,0x04,0x01,0x00,0xd3,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x07,0x00,0x06,0x00, - 0x92,0x0c,0x91,0x08,0x10,0x04,0x08,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00, - 0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04, - 0x04,0x00,0x06,0x00,0x06,0x00,0x52,0x04,0x06,0x00,0x11,0x04,0x06,0x00,0x08,0x00, - 0xd0,0x5e,0xcf,0x86,0xd5,0x2c,0xd4,0x10,0x53,0x04,0x06,0x00,0x92,0x08,0x11,0x04, - 0x06,0x00,0x07,0x00,0x07,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00, - 0x08,0x00,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0a,0x00,0x0b,0x00, - 0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00, - 0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00, - 0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86, - 0xd5,0x1c,0x94,0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00, - 0x51,0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0b,0x00,0x94,0x14,0x93,0x10, - 0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0c,0x00,0x0b,0x00, - 0x0b,0x00,0xd1,0xa8,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c, - 0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00, - 0x0c,0x00,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00, - 0x94,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x01,0x00, - 0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x18,0x53,0x04,0x01,0x00, - 0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x04,0x0c,0x00, - 0x01,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00, - 0x51,0x04,0x0c,0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x52,0x04,0x01,0x00,0x51,0x04, - 0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08, - 0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x06,0x00,0x93,0x0c,0x52,0x04, - 0x06,0x00,0x11,0x04,0x06,0x00,0x01,0x00,0x01,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18, - 0x54,0x04,0x01,0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00, - 0x0c,0x00,0x0c,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08, - 0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04, - 0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10, - 0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0xd2,0x0c, - 0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0d,0x00,0x91,0x08,0x10,0x04,0x0a,0x00, - 0x0d,0x00,0x0c,0x00,0x06,0x00,0x94,0x0c,0x53,0x04,0x06,0x00,0x12,0x04,0x06,0x00, - 0x0a,0x00,0x06,0x00,0xe4,0x39,0x01,0xd3,0x0c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xcf, - 0x06,0x06,0x00,0xd2,0x30,0xd1,0x06,0xcf,0x06,0x06,0x00,0xd0,0x06,0xcf,0x06,0x06, - 0x00,0xcf,0x86,0x95,0x1e,0x54,0x04,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06, - 0x00,0x91,0x0e,0x10,0x0a,0x06,0xff,0xe2,0xab,0x9d,0xcc,0xb8,0x00,0x06,0x00,0x06, - 0x00,0x06,0x00,0xd1,0x80,0xd0,0x3a,0xcf,0x86,0xd5,0x28,0xd4,0x10,0x53,0x04,0x07, - 0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x08,0x00,0xd3,0x08,0x12,0x04,0x08, - 0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a, - 0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf, - 0x86,0xd5,0x30,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10, - 0x04,0x0a,0x00,0x10,0x00,0x10,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10, - 0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x10,0x00,0x10, - 0x00,0x54,0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10, - 0x00,0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c,0x52, - 0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x53, - 0x04,0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x14,0x00,0x91,0x08,0x10,0x04,0x14, - 0x00,0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x10,0x00,0x92, - 0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x15,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x92, - 0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd4, - 0x0c,0x53,0x04,0x14,0x00,0x12,0x04,0x14,0x00,0x11,0x00,0x53,0x04,0x14,0x00,0x52, - 0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0xe3,0xb9,0x01, - 0xd2,0xac,0xd1,0x68,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x14,0x53,0x04, - 0x08,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00, - 0x08,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04, - 0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xd4,0x14,0x53,0x04, - 0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00, - 0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0a,0x00,0x0a,0x00,0x09,0x00, - 0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x0b,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00, - 0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04, - 0x08,0x00,0x10,0x04,0x08,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00, - 0x0b,0xe6,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0d,0x00,0x00,0x00,0x92,0x0c, - 0x91,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd1,0x6c,0xd0,0x2a, - 0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04, - 0x08,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04, - 0x00,0x00,0x0d,0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c, - 0xd3,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0d,0x00,0x52,0x04,0x00,0x00, - 0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08, - 0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04, - 0x00,0x00,0x10,0x04,0x00,0x00,0x0c,0x09,0xd0,0x5a,0xcf,0x86,0xd5,0x18,0x54,0x04, - 0x08,0x00,0x93,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00, - 0x00,0x00,0x00,0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00, - 0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04, - 0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04, - 0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00, - 0x00,0x00,0xcf,0x86,0x95,0x40,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04, - 0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00, - 0x10,0x04,0x08,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00, - 0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04, - 0x08,0x00,0x00,0x00,0x0a,0xe6,0xd2,0x9c,0xd1,0x68,0xd0,0x32,0xcf,0x86,0xd5,0x14, - 0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x0a,0x00,0x11,0x04,0x08,0x00, - 0x0a,0x00,0x54,0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00, - 0x0b,0x00,0x0d,0x00,0x0d,0x00,0x12,0x04,0x0d,0x00,0x10,0x00,0xcf,0x86,0x95,0x30, - 0x94,0x2c,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x12,0x00, - 0x91,0x08,0x10,0x04,0x12,0x00,0x13,0x00,0x13,0x00,0xd2,0x08,0x11,0x04,0x13,0x00, - 0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0x00,0x00,0x00,0x00, - 0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c, - 0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86, - 0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x00,0x00, - 0x00,0x00,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86, - 0xd5,0x14,0x54,0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00, - 0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x04,0x00,0x12,0x04,0x04,0x00, - 0x00,0x00,0xcf,0x86,0xe5,0x8d,0x05,0xe4,0x86,0x05,0xe3,0x7d,0x04,0xe2,0xe4,0x03, - 0xe1,0xc0,0x01,0xd0,0x3e,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01, - 0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0xda,0x01,0xe4,0x91,0x08,0x10, - 0x04,0x01,0xe8,0x01,0xde,0x01,0xe0,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x04, - 0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x04,0x00,0x01, - 0x00,0xcf,0x86,0xd5,0xaa,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00, - 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01, - 0xff,0xe3,0x81,0x8b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81, - 0x8d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01, - 0xff,0xe3,0x81,0x8f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81, - 0x91,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x93, - 0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x95,0xe3,0x82,0x99, - 0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x97,0xe3,0x82, - 0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x99,0xe3,0x82,0x99,0x00,0x01, - 0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9b,0xe3,0x82,0x99,0x00,0x01,0x00, - 0x10,0x0b,0x01,0xff,0xe3,0x81,0x9d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd4,0x53,0xd3, - 0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9f,0xe3,0x82,0x99,0x00, - 0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xa1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1, - 0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xa4,0xe3,0x82,0x99,0x00,0x10,0x04, - 0x01,0x00,0x01,0xff,0xe3,0x81,0xa6,0xe3,0x82,0x99,0x00,0x92,0x13,0x91,0x0f,0x10, - 0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xa8,0xe3,0x82,0x99,0x00,0x01,0x00,0x01,0x00, - 0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x99, - 0x00,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00,0x01,0xff, - 0xe3,0x81,0xb2,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb2, - 0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x99, - 0x00,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x04, - 0x01,0x00,0x01,0xff,0xe3,0x81,0xb8,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3, - 0x81,0xb8,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81, - 0xbb,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82,0x9a,0x00,0x01,0x00, - 0xd0,0xee,0xcf,0x86,0xd5,0x42,0x54,0x04,0x01,0x00,0xd3,0x1b,0x52,0x04,0x01,0x00, - 0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x86,0xe3,0x82,0x99,0x00,0x06,0x00,0x10, - 0x04,0x06,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x08,0x10, - 0x04,0x01,0x08,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0x9d, - 0xe3,0x82,0x99,0x00,0x06,0x00,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04, - 0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b, - 0x01,0xff,0xe3,0x82,0xab,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3, - 0x82,0xad,0xe3,0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b, - 0x01,0xff,0xe3,0x82,0xaf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3, - 0x82,0xb1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82, - 0xb3,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb5,0xe3,0x82, - 0x99,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb7,0xe3, - 0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb9,0xe3,0x82,0x99,0x00, - 0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbb,0xe3,0x82,0x99,0x00,0x01, - 0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbd,0xe3,0x82,0x99,0x00,0x01,0x00,0xcf,0x86, - 0xd5,0xd5,0xd4,0x53,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82, - 0xbf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x81,0xe3,0x82, - 0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x84,0xe3, - 0x82,0x99,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x86,0xe3,0x82,0x99,0x00, - 0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x88,0xe3,0x82,0x99, - 0x00,0x01,0x00,0x01,0x00,0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3, - 0x83,0x8f,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x8f,0xe3,0x82,0x9a,0x00,0x10, - 0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b, - 0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3, - 0x83,0x95,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x95,0xe3,0x82,0x9a,0x00,0xd2, - 0x1e,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x99,0x00, - 0x10,0x0b,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10, - 0x0b,0x01,0xff,0xe3,0x83,0x9b,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x9b,0xe3, - 0x82,0x9a,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x22,0x52,0x04,0x01,0x00,0xd1, - 0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xa6,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x04, - 0x01,0x00,0x01,0xff,0xe3,0x83,0xaf,0xe3,0x82,0x99,0x00,0xd2,0x25,0xd1,0x16,0x10, - 0x0b,0x01,0xff,0xe3,0x83,0xb0,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0xb1,0xe3, - 0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xb2,0xe3,0x82,0x99,0x00,0x01,0x00, - 0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xbd,0xe3,0x82,0x99,0x00,0x06, - 0x00,0xd1,0x4c,0xd0,0x46,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x00, - 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4, - 0x18,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a, - 0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00, - 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x06,0x01,0x00,0xd0,0x32,0xcf, - 0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01, - 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x54,0x04,0x04,0x00,0x53,0x04,0x04, - 0x00,0x92,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xcf, - 0x86,0xd5,0x08,0x14,0x04,0x08,0x00,0x0a,0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x0a, - 0x00,0x00,0x00,0x00,0x00,0x06,0x00,0xd2,0xa4,0xd1,0x5c,0xd0,0x22,0xcf,0x86,0x95, - 0x1c,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10, - 0x04,0x01,0x00,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x01,0x00,0xcf,0x86,0xd5, - 0x20,0xd4,0x0c,0x93,0x08,0x12,0x04,0x01,0x00,0x0b,0x00,0x0b,0x00,0x93,0x10,0x92, - 0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x54, - 0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x07,0x00,0x10, - 0x04,0x08,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01, - 0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06, - 0x00,0x06,0x00,0xcf,0x86,0xd5,0x10,0x94,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01, - 0x00,0x07,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01, - 0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x16,0x00,0xd1,0x30,0xd0,0x06,0xcf, - 0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x52, - 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0x92,0x0c,0x51, - 0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01, - 0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01, - 0x00,0x11,0x04,0x01,0x00,0x07,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52, - 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x07,0x00,0xcf,0x06,0x04, - 0x00,0xcf,0x06,0x04,0x00,0xd1,0x48,0xd0,0x40,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x04, - 0x00,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x2c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xd1, - 0x06,0xcf,0x06,0x04,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04, - 0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xcf, - 0x06,0x07,0x00,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf, - 0x06,0x01,0x00,0xe2,0x71,0x05,0xd1,0x8c,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00, - 0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xd4,0x06,0xcf,0x06,0x01,0x00,0xd3,0x06, - 0xcf,0x06,0x01,0x00,0xd2,0x06,0xcf,0x06,0x01,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00, - 0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x01,0x00, - 0x11,0x04,0x01,0x00,0x08,0x00,0x08,0x00,0x53,0x04,0x08,0x00,0x12,0x04,0x08,0x00, - 0x0a,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00, - 0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x11,0x00,0x11,0x00,0x93,0x0c, - 0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x13,0x00,0x13,0x00,0x94,0x14,0x53,0x04, - 0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00, - 0x00,0x00,0xe0,0xdb,0x04,0xcf,0x86,0xe5,0xdf,0x01,0xd4,0x06,0xcf,0x06,0x04,0x00, - 0xd3,0x74,0xd2,0x6e,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18, - 0x94,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00, - 0x00,0x00,0x00,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x04,0x00, - 0x06,0x00,0x04,0x00,0x04,0x00,0x93,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04, - 0x06,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,0x95,0x24,0x94,0x20,0x93,0x1c, - 0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x04,0x00,0xd1,0x08,0x10,0x04, - 0x04,0x00,0x06,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0x0b,0x00, - 0xcf,0x06,0x0a,0x00,0xd2,0x84,0xd1,0x4c,0xd0,0x16,0xcf,0x86,0x55,0x04,0x0a,0x00, - 0x94,0x0c,0x53,0x04,0x0a,0x00,0x12,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0xcf,0x86, - 0x55,0x04,0x0a,0x00,0xd4,0x1c,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x0a,0x00, - 0x0a,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xe6, - 0xd3,0x08,0x12,0x04,0x0a,0x00,0x0d,0xe6,0x52,0x04,0x0d,0xe6,0x11,0x04,0x0a,0xe6, - 0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00, - 0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11,0xe6,0x0d,0xe6,0x0b,0x00, - 0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c,0x92,0x08,0x11,0x04, - 0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x24, - 0x54,0x04,0x08,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04, - 0x08,0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00, - 0x0a,0x00,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00, - 0x0a,0x00,0x0a,0x00,0xcf,0x06,0x0a,0x00,0xd0,0x5e,0xcf,0x86,0xd5,0x28,0xd4,0x18, - 0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0xd1,0x08,0x10,0x04,0x0a,0x00,0x0c,0x00, - 0x10,0x04,0x0c,0x00,0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x0d,0x00, - 0x10,0x00,0x10,0x00,0xd4,0x1c,0x53,0x04,0x0c,0x00,0xd2,0x0c,0x51,0x04,0x0c,0x00, - 0x10,0x04,0x0d,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x12,0x00,0x14,0x00, - 0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0x92,0x08,0x11,0x04, - 0x14,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x1c,0x94,0x18,0x93,0x14,0xd2,0x08, - 0x11,0x04,0x00,0x00,0x15,0x00,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0xd3,0x10,0x52,0x04,0x00,0x00,0x51,0x04, - 0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x92,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04, - 0x0c,0x00,0x0a,0x00,0x0a,0x00,0xe4,0xf2,0x02,0xe3,0x65,0x01,0xd2,0x98,0xd1,0x48, - 0xd0,0x36,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x08,0x00,0x51,0x04, - 0x08,0x00,0x10,0x04,0x08,0x09,0x08,0x00,0x08,0x00,0x08,0x00,0xd4,0x0c,0x53,0x04, - 0x08,0x00,0x12,0x04,0x08,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08,0x11,0x04, - 0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0x54,0x04,0x09,0x00, - 0x13,0x04,0x09,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x0a,0x00,0xcf,0x86,0xd5,0x2c, - 0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x09,0x12,0x00, - 0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x0a,0x00,0x53,0x04,0x0a,0x00, - 0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x0b,0xe6,0xd3,0x0c, - 0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x11,0x04, - 0x11,0x00,0x14,0x00,0xd1,0x60,0xd0,0x22,0xcf,0x86,0x55,0x04,0x0a,0x00,0x94,0x18, - 0x53,0x04,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0xdc, - 0x11,0x04,0x0a,0xdc,0x0a,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x0a,0x00, - 0xd3,0x10,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x0a,0x09,0x00,0x00, - 0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x54,0x04, - 0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00, - 0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00, - 0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0x07,0x0b,0x00, - 0x0b,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x20,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04, - 0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00, - 0x10,0x04,0x00,0x00,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x08,0x11,0x04,0x0b,0x00, - 0x00,0x00,0x11,0x04,0x00,0x00,0x0b,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00, - 0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd2,0xd0, - 0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04,0x0a,0x00,0x93,0x10, - 0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00, - 0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x11,0x04, - 0x0a,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00, - 0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x10,0x00, - 0xd0,0x3a,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0xd3,0x1c,0xd2,0x0c, - 0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xdc, - 0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0xe6, - 0x0b,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6,0xcf,0x86,0xd5,0x2c,0xd4,0x18, - 0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6,0x10,0x04,0x0b,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00, - 0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04,0x0d,0x00,0x93,0x10,0x52,0x04, - 0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x00,0x00,0x00,0x00,0xd1,0x8c, - 0xd0,0x72,0xcf,0x86,0xd5,0x4c,0xd4,0x30,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04, - 0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00, + 0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x08,0x00,0x01,0x00, + 0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xcf,0x86, + 0xd5,0x61,0xd4,0x45,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, + 0x00,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08,0x10,0x04,0x01,0x00, + 0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0,0xae,0xbe,0x00,0x01,0xff,0xe0, + 0xaf,0x87,0xe0,0xae,0xbe,0x00,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xaf,0x86,0xe0, + 0xaf,0x97,0x00,0x01,0x09,0x00,0x00,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0a, + 0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x00, + 0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x08, + 0x00,0x01,0x00,0x01,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01, + 0x00,0x07,0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00, + 0x00,0x00,0x00,0xe3,0x1c,0x04,0xe2,0x1a,0x02,0xd1,0xf3,0xd0,0x76,0xcf,0x86,0xd5, + 0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01, + 0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91, + 0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10, + 0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01, + 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd3, + 0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x01,0x00,0x01,0x00,0xd2, + 0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x01, + 0x00,0xcf,0x86,0xd5,0x53,0xd4,0x2f,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10, + 0x04,0x01,0x00,0x00,0x00,0x01,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x01,0xff,0xe0, + 0xb1,0x86,0xe0,0xb1,0x96,0x00,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00, + 0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00, + 0x01,0x54,0x10,0x04,0x01,0x5b,0x00,0x00,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04, + 0x11,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01,0x00, + 0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x52,0x04,0x00,0x00, + 0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0a,0x00,0xd0,0x76,0xcf,0x86, + 0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x10,0x00, + 0x01,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00, + 0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x14,0x53,0x04, + 0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00, + 0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00, + 0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x07,0x07,0x07,0x00, + 0x01,0x00,0xcf,0x86,0xd5,0x82,0xd4,0x5e,0xd3,0x2a,0xd2,0x13,0x91,0x0f,0x10,0x0b, + 0x01,0xff,0xe0,0xb2,0xbf,0xe0,0xb3,0x95,0x00,0x01,0x00,0x01,0x00,0xd1,0x08,0x10, + 0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3, + 0x95,0x00,0xd2,0x28,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x96, + 0x00,0x00,0x00,0x10,0x0b,0x01,0xff,0xe0,0xb3,0x86,0xe0,0xb3,0x82,0x00,0x01,0xff, + 0xe0,0xb3,0x86,0xe0,0xb3,0x82,0xe0,0xb3,0x95,0x00,0x91,0x08,0x10,0x04,0x01,0x00, + 0x01,0x09,0x00,0x00,0xd3,0x14,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00, + 0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00, + 0x10,0x04,0x01,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04,0x01,0x00, + 0x09,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x93,0x14,0x92,0x10,0xd1,0x08, + 0x10,0x04,0x00,0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0xe1,0x06,0x01,0xd0,0x6e,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x91, + 0x08,0x10,0x04,0x13,0x00,0x10,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x01, + 0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01, + 0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01, + 0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x01, + 0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01, + 0x00,0x10,0x04,0x0c,0x00,0x13,0x09,0x91,0x08,0x10,0x04,0x13,0x09,0x0a,0x00,0x01, + 0x00,0xcf,0x86,0xd5,0x65,0xd4,0x45,0xd3,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10, + 0x04,0x0a,0x00,0x00,0x00,0x01,0x00,0xd2,0x1e,0xd1,0x08,0x10,0x04,0x01,0x00,0x00, + 0x00,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb4,0xbe,0x00,0x01,0xff,0xe0,0xb5, + 0x87,0xe0,0xb4,0xbe,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe0,0xb5,0x86,0xe0,0xb5, + 0x97,0x00,0x01,0x09,0x10,0x04,0x0c,0x00,0x12,0x00,0xd3,0x10,0x52,0x04,0x00,0x00, + 0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x01,0x00,0x52,0x04,0x12,0x00,0x51,0x04, + 0x12,0x00,0x10,0x04,0x12,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0xd2,0x08,0x11,0x04, + 0x01,0x00,0x0a,0x00,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x0c,0x52,0x04, + 0x0a,0x00,0x11,0x04,0x0a,0x00,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00, + 0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x5a,0xcf,0x86,0xd5,0x34,0xd4,0x18,0x93,0x14, + 0xd2,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x04,0x00, + 0x04,0x00,0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04, + 0x04,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x54,0x04, + 0x04,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x04,0x00,0x10,0x04,0x00,0x00,0x04,0x00, + 0x04,0x00,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x04,0x00,0x00,0x00, + 0xcf,0x86,0xd5,0x77,0xd4,0x28,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00, + 0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x04,0x09, + 0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x04,0x00,0xd3,0x14,0x52,0x04, + 0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x10,0x04,0x04,0x00,0x00,0x00, + 0xd2,0x13,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8a, + 0x00,0x04,0x00,0xd1,0x19,0x10,0x0b,0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0x00, + 0x04,0xff,0xe0,0xb7,0x99,0xe0,0xb7,0x8f,0xe0,0xb7,0x8a,0x00,0x10,0x0b,0x04,0xff, + 0xe0,0xb7,0x99,0xe0,0xb7,0x9f,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c,0x52,0x04,0x00, + 0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x14,0xd2,0x08,0x11,0x04,0x00, + 0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xe2, + 0x31,0x01,0xd1,0x58,0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c, + 0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00, + 0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x67,0x10,0x04, + 0x01,0x09,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xcf,0x86, + 0x95,0x18,0xd4,0x0c,0x53,0x04,0x01,0x00,0x12,0x04,0x01,0x6b,0x01,0x00,0x53,0x04, + 0x01,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd0,0x9e,0xcf,0x86,0xd5,0x54, + 0xd4,0x3c,0xd3,0x20,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04, + 0x01,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x15,0x00, + 0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x15,0x00,0x10,0x04,0x01,0x00, + 0x00,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x15,0x00,0xd3,0x08,0x12,0x04, + 0x15,0x00,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00,0x01,0x00, + 0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00, + 0x01,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x10,0x04,0x00,0x00,0x01,0x00, + 0xd2,0x08,0x11,0x04,0x15,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x15,0x00,0x01,0x00, + 0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x76,0x10,0x04,0x15,0x09, + 0x01,0x00,0x11,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0x95,0x34,0xd4,0x20,0xd3,0x14, + 0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x01,0x00, + 0x00,0x00,0x52,0x04,0x01,0x7a,0x11,0x04,0x01,0x00,0x00,0x00,0x53,0x04,0x01,0x00, + 0xd2,0x08,0x11,0x04,0x01,0x00,0x00,0x00,0x11,0x04,0x01,0x00,0x0d,0x00,0x00,0x00, + 0xe1,0x2b,0x01,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04,0x02,0x00,0x53,0x04,0x02, + 0x00,0x92,0x08,0x11,0x04,0x02,0xdc,0x02,0x00,0x02,0x00,0x54,0x04,0x02,0x00,0xd3, + 0x14,0x52,0x04,0x02,0x00,0xd1,0x08,0x10,0x04,0x02,0x00,0x02,0xdc,0x10,0x04,0x02, + 0x00,0x02,0xdc,0x92,0x0c,0x91,0x08,0x10,0x04,0x02,0x00,0x02,0xd8,0x02,0x00,0x02, + 0x00,0xcf,0x86,0xd5,0x73,0xd4,0x36,0xd3,0x17,0x92,0x13,0x51,0x04,0x02,0x00,0x10, + 0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x82,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd2,0x0c, + 0x91,0x08,0x10,0x04,0x00,0x00,0x02,0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00, + 0x02,0xff,0xe0,0xbd,0x8c,0xe0,0xbe,0xb7,0x00,0x02,0x00,0xd3,0x26,0xd2,0x13,0x51, + 0x04,0x02,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x91,0xe0,0xbe,0xb7,0x00,0x02,0x00, + 0x51,0x04,0x02,0x00,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x96,0xe0,0xbe,0xb7, + 0x00,0x52,0x04,0x02,0x00,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbd,0x9b,0xe0,0xbe, + 0xb7,0x00,0x02,0x00,0x02,0x00,0xd4,0x27,0x53,0x04,0x02,0x00,0xd2,0x17,0xd1,0x0f, + 0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbd,0x80,0xe0,0xbe,0xb5,0x00,0x10,0x04,0x04, + 0x00,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0xd3,0x35,0xd2, + 0x17,0xd1,0x08,0x10,0x04,0x00,0x00,0x02,0x81,0x10,0x04,0x02,0x82,0x02,0xff,0xe0, + 0xbd,0xb1,0xe0,0xbd,0xb2,0x00,0xd1,0x0f,0x10,0x04,0x02,0x84,0x02,0xff,0xe0,0xbd, + 0xb1,0xe0,0xbd,0xb4,0x00,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xb2,0xe0,0xbe,0x80,0x00, + 0x02,0x00,0xd2,0x13,0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xb3,0xe0,0xbe,0x80, + 0x00,0x02,0x00,0x02,0x82,0x11,0x04,0x02,0x82,0x02,0x00,0xd0,0xd3,0xcf,0x86,0xd5, + 0x65,0xd4,0x27,0xd3,0x1f,0xd2,0x13,0x91,0x0f,0x10,0x04,0x02,0x82,0x02,0xff,0xe0, + 0xbd,0xb1,0xe0,0xbe,0x80,0x00,0x02,0xe6,0x91,0x08,0x10,0x04,0x02,0x09,0x02,0x00, + 0x02,0xe6,0x12,0x04,0x02,0x00,0x0c,0x00,0xd3,0x1f,0xd2,0x13,0x51,0x04,0x02,0x00, + 0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x92,0xe0,0xbe,0xb7,0x00,0x51,0x04,0x02, + 0x00,0x10,0x04,0x04,0x00,0x02,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x02, + 0x00,0x02,0x00,0x91,0x0f,0x10,0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0x9c,0xe0,0xbe, + 0xb7,0x00,0x02,0x00,0xd4,0x3d,0xd3,0x26,0xd2,0x13,0x51,0x04,0x02,0x00,0x10,0x0b, + 0x02,0xff,0xe0,0xbe,0xa1,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x51,0x04,0x02,0x00,0x10, + 0x04,0x02,0x00,0x02,0xff,0xe0,0xbe,0xa6,0xe0,0xbe,0xb7,0x00,0x52,0x04,0x02,0x00, + 0x91,0x0f,0x10,0x0b,0x02,0xff,0xe0,0xbe,0xab,0xe0,0xbe,0xb7,0x00,0x02,0x00,0x04, + 0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x02,0x00,0x02,0x00,0x02, + 0x00,0xd2,0x13,0x91,0x0f,0x10,0x04,0x04,0x00,0x02,0xff,0xe0,0xbe,0x90,0xe0,0xbe, + 0xb5,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xcf,0x86, + 0x95,0x4c,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04, + 0x04,0xdc,0x04,0x00,0x52,0x04,0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x00,0x00, + 0x10,0x04,0x0a,0x00,0x04,0x00,0xd3,0x14,0xd2,0x08,0x11,0x04,0x08,0x00,0x0a,0x00, + 0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04, + 0x0b,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86, + 0xe5,0xf7,0x04,0xe4,0x79,0x03,0xe3,0x7b,0x01,0xe2,0x04,0x01,0xd1,0x7f,0xd0,0x65, + 0xcf,0x86,0x55,0x04,0x04,0x00,0xd4,0x33,0xd3,0x1f,0xd2,0x0c,0x51,0x04,0x04,0x00, + 0x10,0x04,0x0a,0x00,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x0b,0x04,0xff,0xe1,0x80, + 0xa5,0xe1,0x80,0xae,0x00,0x04,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0a,0x00,0x04, + 0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x04,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x04, + 0x00,0x10,0x04,0x04,0x00,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x04,0x00,0x04, + 0x07,0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0x09,0x10,0x04,0x0a,0x09,0x0a, + 0x00,0x0a,0x00,0xcf,0x86,0x95,0x14,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92, + 0x08,0x11,0x04,0x04,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xd0,0x2e,0xcf,0x86,0x95, + 0x28,0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a, + 0x00,0x0a,0xdc,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00,0x0b, + 0x00,0x11,0x04,0x0b,0x00,0x0a,0x00,0x01,0x00,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3, + 0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x52, + 0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d,0x00,0x00,0x00,0x01,0x00,0x54, + 0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01, + 0x00,0x06,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x06,0x00,0x08,0x00,0x10,0x04,0x08, + 0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0d,0x00,0x0d,0x00,0xd1,0x3e,0xd0, + 0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x1d,0x54,0x04,0x01,0x00,0x53,0x04,0x01, + 0x00,0xd2,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b, + 0x00,0x01,0xff,0x00,0x94,0x15,0x93,0x11,0x92,0x0d,0x91,0x09,0x10,0x05,0x01,0xff, + 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55, + 0x04,0x01,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01, + 0x00,0x0b,0x00,0x0b,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54, + 0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0b,0x00,0x0b, + 0x00,0xe2,0x21,0x01,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10, + 0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00, + 0x04,0x00,0x04,0x00,0xcf,0x86,0x95,0x48,0xd4,0x24,0xd3,0x10,0x52,0x04,0x04,0x00, + 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04, + 0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0xd3,0x10,0x52,0x04, + 0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08, + 0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00, + 0xd0,0x62,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x10,0x52,0x04,0x04,0x00,0x51,0x04, + 0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00, + 0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0xd4,0x14,0x53,0x04, + 0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00, + 0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04, + 0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00, + 0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0xd2,0x0c,0x91,0x08,0x10,0x04, + 0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x52,0x04,0x04,0x00, + 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x93,0x10,0x52,0x04,0x04,0x00, + 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x94,0x14,0x53,0x04, + 0x04,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00, + 0x04,0x00,0xd1,0x9c,0xd0,0x3e,0xcf,0x86,0x95,0x38,0xd4,0x14,0x53,0x04,0x04,0x00, + 0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0xd3,0x14, + 0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x04,0x00,0x11,0x04,0x04,0x00, + 0x00,0x00,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00, + 0x04,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04, + 0x04,0x00,0x10,0x04,0x04,0x00,0x08,0x00,0x04,0x00,0x53,0x04,0x04,0x00,0xd2,0x0c, + 0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x00,0x00, + 0x0c,0xe6,0x10,0x04,0x0c,0xe6,0x08,0xe6,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x08,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x53,0x04,0x04,0x00, + 0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a, + 0xcf,0x86,0x95,0x14,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04, + 0x08,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04, + 0x04,0x00,0xd3,0x10,0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x11,0x00, + 0x00,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0xd3,0x30,0xd2,0x2a, + 0xd1,0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x0b,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0x04,0x00, + 0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd2,0x6c,0xd1,0x24, + 0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00, + 0x93,0x10,0x52,0x04,0x04,0x00,0x51,0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x0b,0x00, + 0x0b,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00, + 0x52,0x04,0x04,0x00,0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00, + 0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x04,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04, + 0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x46,0xcf,0x86,0xd5,0x28, + 0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x00, + 0x00,0x00,0x06,0x00,0x93,0x10,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04,0x06,0x09, + 0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x06,0x00,0x93,0x14,0x52,0x04,0x06,0x00, + 0xd1,0x08,0x10,0x04,0x06,0x09,0x06,0x00,0x10,0x04,0x06,0x00,0x00,0x00,0x00,0x00, + 0xcf,0x86,0xd5,0x10,0x54,0x04,0x06,0x00,0x93,0x08,0x12,0x04,0x06,0x00,0x00,0x00, + 0x00,0x00,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x08,0x10,0x04, + 0x06,0x00,0x00,0x00,0x06,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00, + 0x00,0x00,0x06,0x00,0x00,0x00,0x00,0x00,0xd0,0x1b,0xcf,0x86,0x55,0x04,0x04,0x00, + 0x54,0x04,0x04,0x00,0x93,0x0d,0x52,0x04,0x04,0x00,0x11,0x05,0x04,0xff,0x00,0x04, + 0x00,0x04,0x00,0xcf,0x86,0xd5,0x24,0x54,0x04,0x04,0x00,0xd3,0x10,0x92,0x0c,0x51, + 0x04,0x04,0x00,0x10,0x04,0x04,0x09,0x04,0x00,0x04,0x00,0x52,0x04,0x04,0x00,0x91, + 0x08,0x10,0x04,0x04,0x00,0x07,0xe6,0x00,0x00,0xd4,0x10,0x53,0x04,0x04,0x00,0x92, + 0x08,0x11,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x08,0x11, + 0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xe4,0xb7,0x03,0xe3,0x58,0x01,0xd2,0x8f,0xd1, + 0x53,0xd0,0x35,0xcf,0x86,0x95,0x2f,0xd4,0x1f,0x53,0x04,0x04,0x00,0xd2,0x0d,0x51, + 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x04,0xff,0x00,0x51,0x05,0x04,0xff,0x00,0x10, + 0x05,0x04,0xff,0x00,0x00,0x00,0x53,0x04,0x04,0x00,0x92,0x08,0x11,0x04,0x04,0x00, + 0x00,0x00,0x00,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04,0x00,0x54,0x04,0x04,0x00, + 0x53,0x04,0x04,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x04,0x00,0x94,0x18,0x53,0x04,0x04,0x00, + 0x92,0x10,0xd1,0x08,0x10,0x04,0x04,0x00,0x04,0xe4,0x10,0x04,0x0a,0x00,0x00,0x00, + 0x00,0x00,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x0c, + 0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd1,0x80,0xd0,0x42, + 0xcf,0x86,0xd5,0x1c,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00, + 0xd1,0x08,0x10,0x04,0x07,0x00,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0xd4,0x0c, + 0x53,0x04,0x07,0x00,0x12,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x92,0x10, + 0xd1,0x08,0x10,0x04,0x07,0x00,0x07,0xde,0x10,0x04,0x07,0xe6,0x07,0xdc,0x00,0x00, + 0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00, + 0x00,0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd4,0x10,0x53,0x04,0x07,0x00, + 0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x07,0x00, + 0x91,0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x1a,0xcf,0x86, + 0x55,0x04,0x08,0x00,0x94,0x10,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00, + 0x0b,0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x95,0x28,0xd4,0x10,0x53,0x04,0x08,0x00, + 0x92,0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0xd2,0x0c, + 0x51,0x04,0x08,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x08,0x00, + 0x07,0x00,0xd2,0xe4,0xd1,0x80,0xd0,0x2e,0xcf,0x86,0x95,0x28,0x54,0x04,0x08,0x00, + 0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x08,0xe6, + 0xd2,0x0c,0x91,0x08,0x10,0x04,0x08,0xdc,0x08,0x00,0x08,0x00,0x11,0x04,0x00,0x00, + 0x08,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00, + 0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xd4,0x14, + 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00, + 0x0b,0x00,0xd3,0x10,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xe6, + 0x0b,0xe6,0x52,0x04,0x0b,0xe6,0xd1,0x08,0x10,0x04,0x0b,0xe6,0x00,0x00,0x10,0x04, + 0x00,0x00,0x0b,0xdc,0xd0,0x5e,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0b,0x00, + 0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08, + 0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd4,0x10,0x53,0x04,0x0b,0x00,0x52,0x04, + 0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x10,0xe6,0x91,0x08, + 0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0xdc,0xd2,0x0c,0x51,0x04,0x10,0xdc,0x10,0x04, + 0x10,0xdc,0x10,0xe6,0xd1,0x08,0x10,0x04,0x10,0xe6,0x10,0xdc,0x10,0x04,0x10,0x00, + 0x00,0x00,0xcf,0x06,0x00,0x00,0xe1,0x1e,0x01,0xd0,0xaa,0xcf,0x86,0xd5,0x6e,0xd4, + 0x53,0xd3,0x17,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1, + 0xac,0x85,0xe1,0xac,0xb5,0x00,0x09,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x09,0xff, + 0xe1,0xac,0x87,0xe1,0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x89, + 0xe1,0xac,0xb5,0x00,0x09,0x00,0xd1,0x0f,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8b,0xe1, + 0xac,0xb5,0x00,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac,0x8d,0xe1,0xac,0xb5,0x00, + 0x09,0x00,0x93,0x17,0x92,0x13,0x51,0x04,0x09,0x00,0x10,0x0b,0x09,0xff,0xe1,0xac, + 0x91,0xe1,0xac,0xb5,0x00,0x09,0x00,0x09,0x00,0x09,0x00,0x54,0x04,0x09,0x00,0xd3, + 0x10,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x07,0x09,0x00,0x09,0x00,0xd2, + 0x13,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xba,0xe1,0xac, + 0xb5,0x00,0x91,0x0f,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xac,0xbc,0xe1,0xac,0xb5, + 0x00,0x09,0x00,0xcf,0x86,0xd5,0x3d,0x94,0x39,0xd3,0x31,0xd2,0x25,0xd1,0x16,0x10, + 0x0b,0x09,0xff,0xe1,0xac,0xbe,0xe1,0xac,0xb5,0x00,0x09,0xff,0xe1,0xac,0xbf,0xe1, + 0xac,0xb5,0x00,0x10,0x04,0x09,0x00,0x09,0xff,0xe1,0xad,0x82,0xe1,0xac,0xb5,0x00, + 0x91,0x08,0x10,0x04,0x09,0x09,0x09,0x00,0x09,0x00,0x12,0x04,0x09,0x00,0x00,0x00, + 0x09,0x00,0xd4,0x1c,0x53,0x04,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09,0x00,0x10,0x04, + 0x09,0x00,0x09,0xe6,0x91,0x08,0x10,0x04,0x09,0xdc,0x09,0xe6,0x09,0xe6,0xd3,0x08, + 0x12,0x04,0x09,0xe6,0x09,0x00,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00, + 0x00,0x00,0x00,0x00,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x18,0x53,0x04, + 0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x09,0x0d,0x09,0x11,0x04, + 0x0d,0x00,0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x0d,0x00, + 0x0d,0x00,0xcf,0x86,0x55,0x04,0x0c,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x0c,0x00, + 0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x07,0x0c,0x00,0x0c,0x00,0xd3,0x0c,0x92,0x08, + 0x11,0x04,0x0c,0x00,0x0c,0x09,0x00,0x00,0x12,0x04,0x00,0x00,0x0c,0x00,0xe3,0xb2, + 0x01,0xe2,0x09,0x01,0xd1,0x4c,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0a,0x00,0x54,0x04, + 0x0a,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04,0x0a,0x00, + 0x0a,0x07,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00, + 0xcf,0x86,0x95,0x1c,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00, + 0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00, + 0xd0,0x3a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x14,0x00,0x54,0x04,0x14,0x00, + 0x53,0x04,0x14,0x00,0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00, + 0x91,0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x14,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x08, + 0x13,0x04,0x0d,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x0b,0xe6,0x10,0x04, + 0x0b,0xe6,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x01,0x0b,0xdc,0x0b,0xdc,0x92,0x08, + 0x11,0x04,0x0b,0xdc,0x0b,0xe6,0x0b,0xdc,0xd4,0x28,0xd3,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x01,0x0b,0x01,0xd2,0x0c,0x91,0x08,0x10,0x04, + 0x0b,0x01,0x0b,0x00,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0xdc,0x0b,0x00, + 0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0d,0x00,0xd1,0x08, + 0x10,0x04,0x0d,0xe6,0x0d,0x00,0x10,0x04,0x0d,0x00,0x13,0x00,0x92,0x0c,0x51,0x04, + 0x10,0xe6,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06, + 0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x94,0x0c,0x53,0x04,0x07,0x00,0x12,0x04, + 0x07,0x00,0x08,0x00,0x08,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0xd5,0x40, + 0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x51,0x04,0x08,0xe6,0x10,0x04,0x08,0xdc,0x08,0xe6, + 0x09,0xe6,0xd2,0x0c,0x51,0x04,0x09,0xe6,0x10,0x04,0x09,0xdc,0x0a,0xe6,0xd1,0x08, + 0x10,0x04,0x0a,0xe6,0x0a,0xea,0x10,0x04,0x0a,0xd6,0x0a,0xdc,0x93,0x10,0x92,0x0c, + 0x91,0x08,0x10,0x04,0x0a,0xca,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0x0a,0xe6,0xd4,0x14, + 0x93,0x10,0x52,0x04,0x0a,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xe6, + 0x10,0xe6,0xd3,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x13,0xe8, + 0x13,0xe4,0xd2,0x10,0xd1,0x08,0x10,0x04,0x13,0xe4,0x13,0xdc,0x10,0x04,0x00,0x00, + 0x12,0xe6,0xd1,0x08,0x10,0x04,0x0c,0xe9,0x0b,0xdc,0x10,0x04,0x09,0xe6,0x09,0xdc, + 0xe2,0x80,0x08,0xe1,0x48,0x04,0xe0,0x1c,0x02,0xcf,0x86,0xe5,0x11,0x01,0xd4,0x84, + 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa5,0x00,0x01,0xff, + 0x61,0xcc,0xa5,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0x87,0x00,0x01,0xff,0x62,0xcc, + 0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x42,0xcc,0xa3,0x00,0x01,0xff,0x62,0xcc, + 0xa3,0x00,0x10,0x08,0x01,0xff,0x42,0xcc,0xb1,0x00,0x01,0xff,0x62,0xcc,0xb1,0x00, + 0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x43,0xcc,0xa7,0xcc,0x81,0x00,0x01,0xff, + 0x63,0xcc,0xa7,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0x87,0x00,0x01,0xff, + 0x64,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa3,0x00,0x01,0xff, + 0x64,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xb1,0x00,0x01,0xff,0x64,0xcc, + 0xb1,0x00,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x44,0xcc,0xa7,0x00, + 0x01,0xff,0x64,0xcc,0xa7,0x00,0x10,0x08,0x01,0xff,0x44,0xcc,0xad,0x00,0x01,0xff, + 0x64,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc,0x80,0x00, + 0x01,0xff,0x65,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x84,0xcc, + 0x81,0x00,0x01,0xff,0x65,0xcc,0x84,0xcc,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x01,0xff,0x45,0xcc,0xad,0x00,0x01,0xff,0x65,0xcc,0xad,0x00,0x10,0x08,0x01,0xff, + 0x45,0xcc,0xb0,0x00,0x01,0xff,0x65,0xcc,0xb0,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff, + 0x45,0xcc,0xa7,0xcc,0x86,0x00,0x01,0xff,0x65,0xcc,0xa7,0xcc,0x86,0x00,0x10,0x08, + 0x01,0xff,0x46,0xcc,0x87,0x00,0x01,0xff,0x66,0xcc,0x87,0x00,0xd4,0x84,0xd3,0x40, + 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x47,0xcc,0x84,0x00,0x01,0xff,0x67,0xcc, + 0x84,0x00,0x10,0x08,0x01,0xff,0x48,0xcc,0x87,0x00,0x01,0xff,0x68,0xcc,0x87,0x00, + 0xd1,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa3,0x00,0x01,0xff,0x68,0xcc,0xa3,0x00, + 0x10,0x08,0x01,0xff,0x48,0xcc,0x88,0x00,0x01,0xff,0x68,0xcc,0x88,0x00,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x01,0xff,0x48,0xcc,0xa7,0x00,0x01,0xff,0x68,0xcc,0xa7,0x00, + 0x10,0x08,0x01,0xff,0x48,0xcc,0xae,0x00,0x01,0xff,0x68,0xcc,0xae,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0x49,0xcc,0xb0,0x00,0x01,0xff,0x69,0xcc,0xb0,0x00,0x10,0x0a, + 0x01,0xff,0x49,0xcc,0x88,0xcc,0x81,0x00,0x01,0xff,0x69,0xcc,0x88,0xcc,0x81,0x00, + 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0x81,0x00,0x01,0xff, + 0x6b,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x4b,0xcc,0xa3,0x00,0x01,0xff,0x6b,0xcc, + 0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4b,0xcc,0xb1,0x00,0x01,0xff,0x6b,0xcc, + 0xb1,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xa3,0x00,0x01,0xff,0x6c,0xcc,0xa3,0x00, + 0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4c,0xcc,0xa3,0xcc,0x84,0x00,0x01,0xff, + 0x6c,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x4c,0xcc,0xb1,0x00,0x01,0xff, + 0x6c,0xcc,0xb1,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4c,0xcc,0xad,0x00,0x01,0xff, + 0x6c,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x4d,0xcc,0x81,0x00,0x01,0xff,0x6d,0xcc, + 0x81,0x00,0xcf,0x86,0xe5,0x15,0x01,0xd4,0x88,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x4d,0xcc,0x87,0x00,0x01,0xff,0x6d,0xcc,0x87,0x00,0x10,0x08,0x01, + 0xff,0x4d,0xcc,0xa3,0x00,0x01,0xff,0x6d,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x4e,0xcc,0x87,0x00,0x01,0xff,0x6e,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x4e, + 0xcc,0xa3,0x00,0x01,0xff,0x6e,0xcc,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x4e,0xcc,0xb1,0x00,0x01,0xff,0x6e,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x4e, + 0xcc,0xad,0x00,0x01,0xff,0x6e,0xcc,0xad,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f, + 0xcc,0x83,0xcc,0x81,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01, + 0xff,0x4f,0xcc,0x83,0xcc,0x88,0x00,0x01,0xff,0x6f,0xcc,0x83,0xcc,0x88,0x00,0xd3, + 0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x80,0x00,0x01, + 0xff,0x6f,0xcc,0x84,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x84,0xcc,0x81, + 0x00,0x01,0xff,0x6f,0xcc,0x84,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x50, + 0xcc,0x81,0x00,0x01,0xff,0x70,0xcc,0x81,0x00,0x10,0x08,0x01,0xff,0x50,0xcc,0x87, + 0x00,0x01,0xff,0x70,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x52, + 0xcc,0x87,0x00,0x01,0xff,0x72,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x52,0xcc,0xa3, + 0x00,0x01,0xff,0x72,0xcc,0xa3,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x52,0xcc,0xa3, + 0xcc,0x84,0x00,0x01,0xff,0x72,0xcc,0xa3,0xcc,0x84,0x00,0x10,0x08,0x01,0xff,0x52, + 0xcc,0xb1,0x00,0x01,0xff,0x72,0xcc,0xb1,0x00,0xd4,0x8c,0xd3,0x48,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x01,0xff,0x53,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x87,0x00,0x10, + 0x08,0x01,0xff,0x53,0xcc,0xa3,0x00,0x01,0xff,0x73,0xcc,0xa3,0x00,0xd1,0x14,0x10, + 0x0a,0x01,0xff,0x53,0xcc,0x81,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x81,0xcc,0x87, + 0x00,0x10,0x0a,0x01,0xff,0x53,0xcc,0x8c,0xcc,0x87,0x00,0x01,0xff,0x73,0xcc,0x8c, + 0xcc,0x87,0x00,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff,0x53,0xcc,0xa3,0xcc,0x87, + 0x00,0x01,0xff,0x73,0xcc,0xa3,0xcc,0x87,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0x87, + 0x00,0x01,0xff,0x74,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x54,0xcc,0xa3, + 0x00,0x01,0xff,0x74,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x54,0xcc,0xb1,0x00,0x01, + 0xff,0x74,0xcc,0xb1,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x54, + 0xcc,0xad,0x00,0x01,0xff,0x74,0xcc,0xad,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xa4, + 0x00,0x01,0xff,0x75,0xcc,0xa4,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x55,0xcc,0xb0, + 0x00,0x01,0xff,0x75,0xcc,0xb0,0x00,0x10,0x08,0x01,0xff,0x55,0xcc,0xad,0x00,0x01, + 0xff,0x75,0xcc,0xad,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x83, + 0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x83,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x55, + 0xcc,0x84,0xcc,0x88,0x00,0x01,0xff,0x75,0xcc,0x84,0xcc,0x88,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0x56,0xcc,0x83,0x00,0x01,0xff,0x76,0xcc,0x83,0x00,0x10,0x08,0x01, + 0xff,0x56,0xcc,0xa3,0x00,0x01,0xff,0x76,0xcc,0xa3,0x00,0xe0,0x10,0x02,0xcf,0x86, + 0xd5,0xe1,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc, + 0x80,0x00,0x01,0xff,0x77,0xcc,0x80,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x81,0x00, + 0x01,0xff,0x77,0xcc,0x81,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0x88,0x00, + 0x01,0xff,0x77,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x57,0xcc,0x87,0x00,0x01,0xff, + 0x77,0xcc,0x87,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x57,0xcc,0xa3,0x00, + 0x01,0xff,0x77,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x58,0xcc,0x87,0x00,0x01,0xff, + 0x78,0xcc,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x58,0xcc,0x88,0x00,0x01,0xff, + 0x78,0xcc,0x88,0x00,0x10,0x08,0x01,0xff,0x59,0xcc,0x87,0x00,0x01,0xff,0x79,0xcc, + 0x87,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0x82,0x00, + 0x01,0xff,0x7a,0xcc,0x82,0x00,0x10,0x08,0x01,0xff,0x5a,0xcc,0xa3,0x00,0x01,0xff, + 0x7a,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x5a,0xcc,0xb1,0x00,0x01,0xff, + 0x7a,0xcc,0xb1,0x00,0x10,0x08,0x01,0xff,0x68,0xcc,0xb1,0x00,0x01,0xff,0x74,0xcc, + 0x88,0x00,0x92,0x1d,0xd1,0x10,0x10,0x08,0x01,0xff,0x77,0xcc,0x8a,0x00,0x01,0xff, + 0x79,0xcc,0x8a,0x00,0x10,0x04,0x01,0x00,0x02,0xff,0xc5,0xbf,0xcc,0x87,0x00,0x0a, + 0x00,0xd4,0x98,0xd3,0x48,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x41,0xcc,0xa3, + 0x00,0x01,0xff,0x61,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x41,0xcc,0x89,0x00,0x01, + 0xff,0x61,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x81, + 0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82, + 0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x80,0x00,0xd2,0x28,0xd1,0x14,0x10, + 0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x82,0xcc,0x89, + 0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x61,0xcc,0x82, + 0xcc,0x83,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x82,0x00,0x01, + 0xff,0x61,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x81, + 0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x81,0x00,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10, + 0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x80,0x00,0x01,0xff,0x61,0xcc,0x86,0xcc,0x80, + 0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x89,0x00,0x01,0xff,0x61,0xcc,0x86, + 0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x41,0xcc,0x86,0xcc,0x83,0x00,0x01, + 0xff,0x61,0xcc,0x86,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x41,0xcc,0xa3,0xcc,0x86, + 0x00,0x01,0xff,0x61,0xcc,0xa3,0xcc,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, + 0xff,0x45,0xcc,0xa3,0x00,0x01,0xff,0x65,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x45, + 0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x45, + 0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82, + 0xcc,0x81,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x81,0x00,0xcf,0x86,0xe5,0x31,0x01, + 0xd4,0x90,0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x45,0xcc,0x82,0xcc, + 0x80,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0x45,0xcc, + 0x82,0xcc,0x89,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x89,0x00,0xd1,0x14,0x10,0x0a, + 0x01,0xff,0x45,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x65,0xcc,0x82,0xcc,0x83,0x00, + 0x10,0x0a,0x01,0xff,0x45,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff,0x65,0xcc,0xa3,0xcc, + 0x82,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0x49,0xcc,0x89,0x00,0x01,0xff, + 0x69,0xcc,0x89,0x00,0x10,0x08,0x01,0xff,0x49,0xcc,0xa3,0x00,0x01,0xff,0x69,0xcc, + 0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0x4f,0xcc,0xa3,0x00,0x01,0xff,0x6f,0xcc, + 0xa3,0x00,0x10,0x08,0x01,0xff,0x4f,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x89,0x00, + 0xd3,0x50,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc,0x81,0x00, + 0x01,0xff,0x6f,0xcc,0x82,0xcc,0x81,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x82,0xcc, + 0x80,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x80,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff, + 0x4f,0xcc,0x82,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x89,0x00,0x10,0x0a, + 0x01,0xff,0x4f,0xcc,0x82,0xcc,0x83,0x00,0x01,0xff,0x6f,0xcc,0x82,0xcc,0x83,0x00, + 0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0xa3,0xcc,0x82,0x00,0x01,0xff, + 0x6f,0xcc,0xa3,0xcc,0x82,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x81,0x00, + 0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x81,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc, + 0x9b,0xcc,0x80,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff, + 0x4f,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x89,0x00,0xd4,0x98, + 0xd3,0x48,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc,0x83,0x00, + 0x01,0xff,0x6f,0xcc,0x9b,0xcc,0x83,0x00,0x10,0x0a,0x01,0xff,0x4f,0xcc,0x9b,0xcc, + 0xa3,0x00,0x01,0xff,0x6f,0xcc,0x9b,0xcc,0xa3,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, + 0x55,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff,0x55,0xcc, + 0x89,0x00,0x01,0xff,0x75,0xcc,0x89,0x00,0xd2,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff, + 0x55,0xcc,0x9b,0xcc,0x81,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x81,0x00,0x10,0x0a, + 0x01,0xff,0x55,0xcc,0x9b,0xcc,0x80,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0x80,0x00, + 0xd1,0x14,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x89,0x00,0x01,0xff,0x75,0xcc, + 0x9b,0xcc,0x89,0x00,0x10,0x0a,0x01,0xff,0x55,0xcc,0x9b,0xcc,0x83,0x00,0x01,0xff, + 0x75,0xcc,0x9b,0xcc,0x83,0x00,0xd3,0x44,0xd2,0x24,0xd1,0x14,0x10,0x0a,0x01,0xff, + 0x55,0xcc,0x9b,0xcc,0xa3,0x00,0x01,0xff,0x75,0xcc,0x9b,0xcc,0xa3,0x00,0x10,0x08, + 0x01,0xff,0x59,0xcc,0x80,0x00,0x01,0xff,0x79,0xcc,0x80,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0x59,0xcc,0xa3,0x00,0x01,0xff,0x79,0xcc,0xa3,0x00,0x10,0x08,0x01,0xff, + 0x59,0xcc,0x89,0x00,0x01,0xff,0x79,0xcc,0x89,0x00,0x92,0x14,0x91,0x10,0x10,0x08, + 0x01,0xff,0x59,0xcc,0x83,0x00,0x01,0xff,0x79,0xcc,0x83,0x00,0x0a,0x00,0x0a,0x00, + 0xe1,0xc0,0x04,0xe0,0x80,0x02,0xcf,0x86,0xe5,0x2d,0x01,0xd4,0xa8,0xd3,0x54,0xd2, + 0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x93,0x00,0x01,0xff,0xce,0xb1, + 0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff, + 0xce,0xb1,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc, + 0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01, + 0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82, + 0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x91,0xcc,0x93,0x00,0x01,0xff, + 0xce,0x91,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80,0x00, + 0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce, + 0x91,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x81,0x00,0x10, + 0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x91,0xcc,0x94, + 0xcd,0x82,0x00,0xd3,0x42,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc, + 0x93,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb5,0xcc, + 0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10, + 0x0b,0x01,0xff,0xce,0xb5,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb5,0xcc,0x94, + 0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x95,0xcc, + 0x93,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x95,0xcc, + 0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x94,0xcc,0x80,0x00,0x91,0x16,0x10, + 0x0b,0x01,0xff,0xce,0x95,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x95,0xcc,0x94, + 0xcc,0x81,0x00,0x00,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01, + 0xff,0xce,0xb7,0xcc,0x93,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0x00,0x10,0x0b,0x01, + 0xff,0xce,0xb7,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80, + 0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff, + 0xce,0xb7,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd, + 0x82,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28,0xd1,0x12,0x10, + 0x09,0x01,0xff,0xce,0x97,0xcc,0x93,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0x00,0x10, + 0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x94, + 0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81,0x00, + 0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc, + 0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x82,0x00,0xd3,0x54,0xd2, + 0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x93,0x00,0x01,0xff,0xce,0xb9, + 0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff, + 0xce,0xb9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb9,0xcc, + 0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01, + 0xff,0xce,0xb9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9,0xcc,0x94,0xcd,0x82, + 0x00,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc,0x93,0x00,0x01,0xff, + 0xce,0x99,0xcc,0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcc,0x80,0x00, + 0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce, + 0x99,0xcc,0x93,0xcc,0x81,0x00,0x01,0xff,0xce,0x99,0xcc,0x94,0xcc,0x81,0x00,0x10, + 0x0b,0x01,0xff,0xce,0x99,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0x99,0xcc,0x94, + 0xcd,0x82,0x00,0xcf,0x86,0xe5,0x13,0x01,0xd4,0x84,0xd3,0x42,0xd2,0x28,0xd1,0x12, + 0x10,0x09,0x01,0xff,0xce,0xbf,0xcc,0x93,0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0x00, + 0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc, + 0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0xbf,0xcc,0x93,0xcc,0x81, + 0x00,0x01,0xff,0xce,0xbf,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd2,0x28,0xd1,0x12, + 0x10,0x09,0x01,0xff,0xce,0x9f,0xcc,0x93,0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0x00, + 0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc, + 0x94,0xcc,0x80,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xce,0x9f,0xcc,0x93,0xcc,0x81, + 0x00,0x01,0xff,0xce,0x9f,0xcc,0x94,0xcc,0x81,0x00,0x00,0x00,0xd3,0x54,0xd2,0x28, + 0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x93,0x00,0x01,0xff,0xcf,0x85,0xcc, + 0x94,0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf, + 0x85,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x93, + 0xcc,0x81,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff, + 0xcf,0x85,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x85,0xcc,0x94,0xcd,0x82,0x00, + 0xd2,0x1c,0xd1,0x0d,0x10,0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0x00,0x10, + 0x04,0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x0f,0x10,0x04, + 0x00,0x00,0x01,0xff,0xce,0xa5,0xcc,0x94,0xcc,0x81,0x00,0x10,0x04,0x00,0x00,0x01, + 0xff,0xce,0xa5,0xcc,0x94,0xcd,0x82,0x00,0xd4,0xa8,0xd3,0x54,0xd2,0x28,0xd1,0x12, + 0x10,0x09,0x01,0xff,0xcf,0x89,0xcc,0x93,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0x00, + 0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc, + 0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x81, + 0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89, + 0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0x00,0xd2,0x28, + 0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xa9,0xcc,0x93,0x00,0x01,0xff,0xce,0xa9,0xcc, + 0x94,0x00,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0x00,0x01,0xff,0xce, + 0xa9,0xcc,0x94,0xcc,0x80,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93, + 0xcc,0x81,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x81,0x00,0x10,0x0b,0x01,0xff, + 0xce,0xa9,0xcc,0x93,0xcd,0x82,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0x00, + 0xd3,0x48,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x80,0x00,0x01, + 0xff,0xce,0xb1,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb5,0xcc,0x80,0x00,0x01, + 0xff,0xce,0xb5,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb7,0xcc,0x80, + 0x00,0x01,0xff,0xce,0xb7,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcc,0x80, + 0x00,0x01,0xff,0xce,0xb9,0xcc,0x81,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff, + 0xce,0xbf,0xcc,0x80,0x00,0x01,0xff,0xce,0xbf,0xcc,0x81,0x00,0x10,0x09,0x01,0xff, + 0xcf,0x85,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85,0xcc,0x81,0x00,0x91,0x12,0x10,0x09, + 0x01,0xff,0xcf,0x89,0xcc,0x80,0x00,0x01,0xff,0xcf,0x89,0xcc,0x81,0x00,0x00,0x00, + 0xe0,0xe1,0x02,0xcf,0x86,0xe5,0x91,0x01,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16, + 0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc, + 0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x80,0xcd,0x85, + 0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d, + 0x01,0xff,0xce,0xb1,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc, + 0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb1,0xcc,0x93,0xcd,0x82, + 0xcd,0x85,0x00,0x01,0xff,0xce,0xb1,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30, + 0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x91,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce, + 0x91,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x80, + 0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a, + 0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce, + 0x91,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0x91,0xcc,0x93, + 0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x91,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00, + 0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcd,0x85, + 0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xb7, + 0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x80,0xcd, + 0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0xb7,0xcc,0x93,0xcc,0x81,0xcd,0x85, + 0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff, + 0xce,0xb7,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcc,0x94,0xcd, + 0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16,0x10,0x0b,0x01,0xff,0xce,0x97,0xcc,0x93, + 0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff, + 0xce,0x97,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc, + 0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff,0xce,0x97,0xcc,0x93,0xcc,0x81, + 0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc,0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d, + 0x01,0xff,0xce,0x97,0xcc,0x93,0xcd,0x82,0xcd,0x85,0x00,0x01,0xff,0xce,0x97,0xcc, + 0x94,0xcd,0x82,0xcd,0x85,0x00,0xd4,0xc8,0xd3,0x64,0xd2,0x30,0xd1,0x16,0x10,0x0b, + 0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd, + 0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcc,0x80,0xcd,0x85,0x00,0x01, + 0xff,0xcf,0x89,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d,0x01,0xff, + 0xcf,0x89,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcc, + 0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xcf,0x89,0xcc,0x93,0xcd,0x82,0xcd,0x85, + 0x00,0x01,0xff,0xcf,0x89,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x30,0xd1,0x16, + 0x10,0x0b,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc, + 0x94,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x80,0xcd,0x85, + 0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcc,0x80,0xcd,0x85,0x00,0xd1,0x1a,0x10,0x0d, + 0x01,0xff,0xce,0xa9,0xcc,0x93,0xcc,0x81,0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc, + 0x94,0xcc,0x81,0xcd,0x85,0x00,0x10,0x0d,0x01,0xff,0xce,0xa9,0xcc,0x93,0xcd,0x82, + 0xcd,0x85,0x00,0x01,0xff,0xce,0xa9,0xcc,0x94,0xcd,0x82,0xcd,0x85,0x00,0xd3,0x49, + 0xd2,0x26,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0xb1,0xcc,0x86,0x00,0x01,0xff,0xce, + 0xb1,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x80,0xcd,0x85,0x00,0x01, + 0xff,0xce,0xb1,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xce,0xb1,0xcc,0x81, + 0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb1,0xcd,0x82,0x00,0x01,0xff, + 0xce,0xb1,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce, + 0x91,0xcc,0x86,0x00,0x01,0xff,0xce,0x91,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce, + 0x91,0xcc,0x80,0x00,0x01,0xff,0xce,0x91,0xcc,0x81,0x00,0xd1,0x0d,0x10,0x09,0x01, + 0xff,0xce,0x91,0xcd,0x85,0x00,0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xb9,0x00,0x01, + 0x00,0xcf,0x86,0xe5,0x16,0x01,0xd4,0x8f,0xd3,0x44,0xd2,0x21,0xd1,0x0d,0x10,0x04, + 0x01,0x00,0x01,0xff,0xc2,0xa8,0xcd,0x82,0x00,0x10,0x0b,0x01,0xff,0xce,0xb7,0xcc, + 0x80,0xcd,0x85,0x00,0x01,0xff,0xce,0xb7,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01, + 0xff,0xce,0xb7,0xcc,0x81,0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb7, + 0xcd,0x82,0x00,0x01,0xff,0xce,0xb7,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12, + 0x10,0x09,0x01,0xff,0xce,0x95,0xcc,0x80,0x00,0x01,0xff,0xce,0x95,0xcc,0x81,0x00, + 0x10,0x09,0x01,0xff,0xce,0x97,0xcc,0x80,0x00,0x01,0xff,0xce,0x97,0xcc,0x81,0x00, + 0xd1,0x13,0x10,0x09,0x01,0xff,0xce,0x97,0xcd,0x85,0x00,0x01,0xff,0xe1,0xbe,0xbf, + 0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbe,0xbf,0xcc,0x81,0x00,0x01,0xff,0xe1, + 0xbe,0xbf,0xcd,0x82,0x00,0xd3,0x40,0xd2,0x28,0xd1,0x12,0x10,0x09,0x01,0xff,0xce, + 0xb9,0xcc,0x86,0x00,0x01,0xff,0xce,0xb9,0xcc,0x84,0x00,0x10,0x0b,0x01,0xff,0xce, + 0xb9,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xce,0xb9,0xcc,0x88,0xcc,0x81,0x00,0x51, + 0x04,0x00,0x00,0x10,0x09,0x01,0xff,0xce,0xb9,0xcd,0x82,0x00,0x01,0xff,0xce,0xb9, + 0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce,0x99,0xcc, + 0x86,0x00,0x01,0xff,0xce,0x99,0xcc,0x84,0x00,0x10,0x09,0x01,0xff,0xce,0x99,0xcc, + 0x80,0x00,0x01,0xff,0xce,0x99,0xcc,0x81,0x00,0xd1,0x0e,0x10,0x04,0x00,0x00,0x01, + 0xff,0xe1,0xbf,0xbe,0xcc,0x80,0x00,0x10,0x0a,0x01,0xff,0xe1,0xbf,0xbe,0xcc,0x81, + 0x00,0x01,0xff,0xe1,0xbf,0xbe,0xcd,0x82,0x00,0xd4,0x93,0xd3,0x4e,0xd2,0x28,0xd1, + 0x12,0x10,0x09,0x01,0xff,0xcf,0x85,0xcc,0x86,0x00,0x01,0xff,0xcf,0x85,0xcc,0x84, + 0x00,0x10,0x0b,0x01,0xff,0xcf,0x85,0xcc,0x88,0xcc,0x80,0x00,0x01,0xff,0xcf,0x85, + 0xcc,0x88,0xcc,0x81,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xcf,0x81,0xcc,0x93,0x00, + 0x01,0xff,0xcf,0x81,0xcc,0x94,0x00,0x10,0x09,0x01,0xff,0xcf,0x85,0xcd,0x82,0x00, + 0x01,0xff,0xcf,0x85,0xcc,0x88,0xcd,0x82,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01, + 0xff,0xce,0xa5,0xcc,0x86,0x00,0x01,0xff,0xce,0xa5,0xcc,0x84,0x00,0x10,0x09,0x01, + 0xff,0xce,0xa5,0xcc,0x80,0x00,0x01,0xff,0xce,0xa5,0xcc,0x81,0x00,0xd1,0x12,0x10, + 0x09,0x01,0xff,0xce,0xa1,0xcc,0x94,0x00,0x01,0xff,0xc2,0xa8,0xcc,0x80,0x00,0x10, + 0x09,0x01,0xff,0xc2,0xa8,0xcc,0x81,0x00,0x01,0xff,0x60,0x00,0xd3,0x3b,0xd2,0x18, + 0x51,0x04,0x00,0x00,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x80,0xcd,0x85,0x00,0x01, + 0xff,0xcf,0x89,0xcd,0x85,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xcf,0x89,0xcc,0x81, + 0xcd,0x85,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xcf,0x89,0xcd,0x82,0x00,0x01,0xff, + 0xcf,0x89,0xcd,0x82,0xcd,0x85,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xce, + 0x9f,0xcc,0x80,0x00,0x01,0xff,0xce,0x9f,0xcc,0x81,0x00,0x10,0x09,0x01,0xff,0xce, + 0xa9,0xcc,0x80,0x00,0x01,0xff,0xce,0xa9,0xcc,0x81,0x00,0xd1,0x10,0x10,0x09,0x01, + 0xff,0xce,0xa9,0xcd,0x85,0x00,0x01,0xff,0xc2,0xb4,0x00,0x10,0x04,0x01,0x00,0x00, + 0x00,0xe0,0x7e,0x0c,0xcf,0x86,0xe5,0xbb,0x08,0xe4,0x14,0x06,0xe3,0xf7,0x02,0xe2, + 0xbd,0x01,0xd1,0xd0,0xd0,0x4f,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0xd3,0x18,0x92,0x14, + 0x91,0x10,0x10,0x08,0x01,0xff,0xe2,0x80,0x82,0x00,0x01,0xff,0xe2,0x80,0x83,0x00, + 0x01,0x00,0x01,0x00,0x92,0x0d,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff, + 0x00,0x01,0xff,0x00,0x01,0x00,0x94,0x1b,0x53,0x04,0x01,0x00,0xd2,0x09,0x11,0x04, + 0x01,0x00,0x01,0xff,0x00,0x51,0x05,0x01,0xff,0x00,0x10,0x05,0x01,0xff,0x00,0x04, + 0x00,0x01,0x00,0xcf,0x86,0xd5,0x48,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x01,0x00,0x51, + 0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x52,0x04,0x04,0x00,0x11,0x04,0x04, + 0x00,0x06,0x00,0xd3,0x1c,0xd2,0x0c,0x51,0x04,0x06,0x00,0x10,0x04,0x06,0x00,0x07, + 0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0x52, + 0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x06,0x00,0xd4,0x23,0xd3, + 0x14,0x52,0x05,0x06,0xff,0x00,0x91,0x0a,0x10,0x05,0x0a,0xff,0x00,0x00,0xff,0x00, + 0x0f,0xff,0x00,0x92,0x0a,0x11,0x05,0x0f,0xff,0x00,0x01,0xff,0x00,0x01,0xff,0x00, + 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x00,0x00,0x01,0x00, + 0x01,0x00,0xd0,0x7e,0xcf,0x86,0xd5,0x34,0xd4,0x14,0x53,0x04,0x01,0x00,0x52,0x04, + 0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xd3,0x10,0x52,0x04, + 0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0c,0x00,0x0c,0x00,0x52,0x04,0x0c,0x00, + 0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00, + 0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x02,0x00,0x91,0x08,0x10,0x04, + 0x03,0x00,0x04,0x00,0x04,0x00,0xd3,0x10,0xd2,0x08,0x11,0x04,0x06,0x00,0x08,0x00, + 0x11,0x04,0x08,0x00,0x0b,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00, + 0x10,0x04,0x0e,0x00,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x11,0x00,0x13,0x00, + 0xcf,0x86,0xd5,0x28,0x54,0x04,0x00,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x01,0xe6, + 0x01,0x01,0x01,0xe6,0xd2,0x0c,0x51,0x04,0x01,0x01,0x10,0x04,0x01,0x01,0x01,0xe6, + 0x91,0x08,0x10,0x04,0x01,0xe6,0x01,0x00,0x01,0x00,0xd4,0x30,0xd3,0x1c,0xd2,0x0c, + 0x91,0x08,0x10,0x04,0x01,0x00,0x01,0xe6,0x04,0x00,0xd1,0x08,0x10,0x04,0x06,0x00, + 0x06,0x01,0x10,0x04,0x06,0x01,0x06,0xe6,0x92,0x10,0xd1,0x08,0x10,0x04,0x06,0xdc, + 0x06,0xe6,0x10,0x04,0x06,0x01,0x08,0x01,0x09,0xdc,0x93,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x0a,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x81,0xd0,0x4f, + 0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x29,0xd3,0x13,0x52,0x04,0x01,0x00,0x51,0x04, + 0x01,0x00,0x10,0x07,0x01,0xff,0xce,0xa9,0x00,0x01,0x00,0x92,0x12,0x51,0x04,0x01, + 0x00,0x10,0x06,0x01,0xff,0x4b,0x00,0x01,0xff,0x41,0xcc,0x8a,0x00,0x01,0x00,0x53, + 0x04,0x01,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x04, + 0x00,0x07,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0x95, + 0x2c,0xd4,0x18,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0xd1,0x08,0x10,0x04,0x08, + 0x00,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b, + 0x00,0x10,0x04,0x0b,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x68,0xcf, + 0x86,0xd5,0x48,0xd4,0x28,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01, + 0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x92,0x0c,0x91, + 0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x11,0x00,0x00,0x00,0x53,0x04,0x01,0x00,0x92, + 0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x90,0xcc,0xb8,0x00,0x01, + 0xff,0xe2,0x86,0x92,0xcc,0xb8,0x00,0x01,0x00,0x94,0x1a,0x53,0x04,0x01,0x00,0x52, + 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x86,0x94,0xcc,0xb8, + 0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x2e,0x94,0x2a,0x53,0x04,0x01,0x00,0x52, + 0x04,0x01,0x00,0xd1,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x87,0x90,0xcc,0xb8, + 0x00,0x10,0x0a,0x01,0xff,0xe2,0x87,0x94,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x87,0x92, + 0xcc,0xb8,0x00,0x01,0x00,0xd4,0x14,0x53,0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01, + 0x00,0x10,0x04,0x01,0x00,0x04,0x00,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x06, + 0x00,0x06,0x00,0xe2,0x38,0x02,0xe1,0x3f,0x01,0xd0,0x68,0xcf,0x86,0xd5,0x3e,0x94, + 0x3a,0xd3,0x16,0x52,0x04,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0x83, + 0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0xd2,0x12,0x91,0x0e,0x10,0x04,0x01,0x00,0x01, + 0xff,0xe2,0x88,0x88,0xcc,0xb8,0x00,0x01,0x00,0x91,0x0e,0x10,0x0a,0x01,0xff,0xe2, + 0x88,0x8b,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x24,0x93,0x20,0x52, + 0x04,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa3,0xcc,0xb8,0x00,0x01, + 0x00,0x10,0x0a,0x01,0xff,0xe2,0x88,0xa5,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01, + 0x00,0xcf,0x86,0xd5,0x48,0x94,0x44,0xd3,0x2e,0xd2,0x12,0x91,0x0e,0x10,0x04,0x01, + 0x00,0x01,0xff,0xe2,0x88,0xbc,0xcc,0xb8,0x00,0x01,0x00,0xd1,0x0e,0x10,0x0a,0x01, + 0xff,0xe2,0x89,0x83,0xcc,0xb8,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe2, + 0x89,0x85,0xcc,0xb8,0x00,0x92,0x12,0x91,0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2, + 0x89,0x88,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x40,0xd3,0x1e,0x92, + 0x1a,0xd1,0x0c,0x10,0x08,0x01,0xff,0x3d,0xcc,0xb8,0x00,0x01,0x00,0x10,0x0a,0x01, + 0xff,0xe2,0x89,0xa1,0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1, + 0x0e,0x10,0x04,0x01,0x00,0x01,0xff,0xe2,0x89,0x8d,0xcc,0xb8,0x00,0x10,0x08,0x01, + 0xff,0x3c,0xcc,0xb8,0x00,0x01,0xff,0x3e,0xcc,0xb8,0x00,0xd3,0x30,0xd2,0x18,0x91, + 0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xa4,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xa5, + 0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xb2,0xcc,0xb8, + 0x00,0x01,0xff,0xe2,0x89,0xb3,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18,0x91,0x14,0x10, + 0x0a,0x01,0xff,0xe2,0x89,0xb6,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xb7,0xcc,0xb8, + 0x00,0x01,0x00,0x01,0x00,0xd0,0x86,0xcf,0x86,0xd5,0x50,0x94,0x4c,0xd3,0x30,0xd2, + 0x18,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xba,0xcc,0xb8,0x00,0x01,0xff,0xe2, + 0x89,0xbb,0xcc,0xb8,0x00,0x01,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x82, + 0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x83,0xcc,0xb8,0x00,0x01,0x00,0x92,0x18,0x91, + 0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0x86,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x87, + 0xcc,0xb8,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x30,0x53,0x04,0x01,0x00,0x52, + 0x04,0x01,0x00,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa2,0xcc,0xb8,0x00,0x01, + 0xff,0xe2,0x8a,0xa8,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xa9,0xcc,0xb8, + 0x00,0x01,0xff,0xe2,0x8a,0xab,0xcc,0xb8,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01, + 0x00,0xd4,0x5c,0xd3,0x2c,0x92,0x28,0xd1,0x14,0x10,0x0a,0x01,0xff,0xe2,0x89,0xbc, + 0xcc,0xb8,0x00,0x01,0xff,0xe2,0x89,0xbd,0xcc,0xb8,0x00,0x10,0x0a,0x01,0xff,0xe2, + 0x8a,0x91,0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0x92,0xcc,0xb8,0x00,0x01,0x00,0xd2, + 0x18,0x51,0x04,0x01,0x00,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb2,0xcc,0xb8,0x00,0x01, + 0xff,0xe2,0x8a,0xb3,0xcc,0xb8,0x00,0x91,0x14,0x10,0x0a,0x01,0xff,0xe2,0x8a,0xb4, + 0xcc,0xb8,0x00,0x01,0xff,0xe2,0x8a,0xb5,0xcc,0xb8,0x00,0x01,0x00,0x93,0x0c,0x92, + 0x08,0x11,0x04,0x01,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xd1,0x64,0xd0,0x3e,0xcf, + 0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01,0x00,0x04, + 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x94,0x20,0x53,0x04,0x01,0x00,0x92, + 0x18,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x80,0x88,0x00,0x10,0x08,0x01, + 0xff,0xe3,0x80,0x89,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01, + 0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10, + 0x04,0x01,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00,0x04,0x00,0xd0, + 0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51, + 0x04,0x04,0x00,0x10,0x04,0x04,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0xd5, + 0x2c,0xd4,0x14,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00,0x10, + 0x04,0x06,0x00,0x07,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x08, + 0x00,0x08,0x00,0x08,0x00,0x12,0x04,0x08,0x00,0x09,0x00,0xd4,0x14,0x53,0x04,0x09, + 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0xd3, + 0x08,0x12,0x04,0x0c,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10, + 0x00,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x13,0x00,0xd3,0xa6,0xd2, + 0x74,0xd1,0x40,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x18,0x93,0x14,0x52, + 0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x04,0x00,0x10,0x04,0x04,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x92, + 0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x01, + 0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x14,0x53, + 0x04,0x01,0x00,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x06, + 0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x06, + 0x00,0x07,0x00,0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54, + 0x04,0x01,0x00,0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x06,0x00,0x06, + 0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x13,0x04,0x04, + 0x00,0x06,0x00,0xd2,0xdc,0xd1,0x48,0xd0,0x26,0xcf,0x86,0x95,0x20,0x54,0x04,0x01, + 0x00,0xd3,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x07,0x00,0x06,0x00,0x92,0x0c,0x91, + 0x08,0x10,0x04,0x08,0x00,0x04,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55, + 0x04,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x04,0x00,0x06, + 0x00,0x06,0x00,0x52,0x04,0x06,0x00,0x11,0x04,0x06,0x00,0x08,0x00,0xd0,0x5e,0xcf, + 0x86,0xd5,0x2c,0xd4,0x10,0x53,0x04,0x06,0x00,0x92,0x08,0x11,0x04,0x06,0x00,0x07, + 0x00,0x07,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x52, + 0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x08,0x00,0x0a,0x00,0x0b,0x00,0xd4,0x10,0x93, + 0x0c,0x92,0x08,0x11,0x04,0x07,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd3,0x10,0x92, + 0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x52,0x04,0x0a, + 0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x1c,0x94, + 0x18,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b, + 0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0b,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51, + 0x04,0x0b,0x00,0x10,0x04,0x0c,0x00,0x0b,0x00,0x0c,0x00,0x0b,0x00,0x0b,0x00,0xd1, + 0xa8,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10, + 0x04,0x10,0x00,0x01,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x01, + 0x00,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x01,0x00,0x01,0x00,0x94,0x14,0x53, + 0x04,0x01,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x01,0x00,0x01, + 0x00,0x01,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x18,0x53,0x04,0x01,0x00,0x52,0x04,0x01, + 0x00,0xd1,0x08,0x10,0x04,0x0c,0x00,0x01,0x00,0x10,0x04,0x0c,0x00,0x01,0x00,0xd3, + 0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x0c,0x00,0x51,0x04,0x0c, + 0x00,0x10,0x04,0x01,0x00,0x0b,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10, + 0x04,0x01,0x00,0x0c,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c, + 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x06,0x00,0x93,0x0c,0x52,0x04,0x06,0x00,0x11, + 0x04,0x06,0x00,0x01,0x00,0x01,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01, + 0x00,0x93,0x10,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x0c,0x00,0x0c, + 0x00,0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c, + 0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10, + 0x04,0x01,0x00,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10,0x52,0x04,0x08, + 0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0xd2,0x0c,0x51,0x04,0x09, + 0x00,0x10,0x04,0x09,0x00,0x0d,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0d,0x00,0x0c, + 0x00,0x06,0x00,0x94,0x0c,0x53,0x04,0x06,0x00,0x12,0x04,0x06,0x00,0x0a,0x00,0x06, + 0x00,0xe4,0x39,0x01,0xd3,0x0c,0xd2,0x06,0xcf,0x06,0x04,0x00,0xcf,0x06,0x06,0x00, + 0xd2,0x30,0xd1,0x06,0xcf,0x06,0x06,0x00,0xd0,0x06,0xcf,0x06,0x06,0x00,0xcf,0x86, + 0x95,0x1e,0x54,0x04,0x06,0x00,0x53,0x04,0x06,0x00,0x52,0x04,0x06,0x00,0x91,0x0e, + 0x10,0x0a,0x06,0xff,0xe2,0xab,0x9d,0xcc,0xb8,0x00,0x06,0x00,0x06,0x00,0x06,0x00, + 0xd1,0x80,0xd0,0x3a,0xcf,0x86,0xd5,0x28,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04, + 0x07,0x00,0x11,0x04,0x07,0x00,0x08,0x00,0xd3,0x08,0x12,0x04,0x08,0x00,0x09,0x00, + 0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x94,0x0c, + 0x93,0x08,0x12,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x30, + 0xd4,0x14,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00, + 0x10,0x00,0x10,0x00,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00, + 0x0b,0x00,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x10,0x00,0x10,0x00,0x54,0x04, + 0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x10,0x00, + 0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00, + 0x11,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00, + 0xd2,0x08,0x11,0x04,0x10,0x00,0x14,0x00,0x91,0x08,0x10,0x04,0x14,0x00,0x10,0x00, + 0x10,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x10,0x00,0x15,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x92,0x0c,0x51,0x04, + 0x10,0x00,0x10,0x04,0x13,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd4,0x0c,0x53,0x04, + 0x14,0x00,0x12,0x04,0x14,0x00,0x11,0x00,0x53,0x04,0x14,0x00,0x52,0x04,0x14,0x00, + 0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0xe3,0xb9,0x01,0xd2,0xac,0xd1, + 0x68,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x08,0x00,0x94,0x14,0x53,0x04,0x08,0x00,0x52, + 0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x08,0x00,0xcf, + 0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x08,0x00,0x51, + 0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xd4,0x14,0x53,0x04,0x09,0x00,0x52, + 0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0xd3,0x10,0x92, + 0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x0a,0x00,0x0a,0x00,0x09,0x00,0x52,0x04,0x0a, + 0x00,0x11,0x04,0x0a,0x00,0x0b,0x00,0xd0,0x06,0xcf,0x06,0x08,0x00,0xcf,0x86,0x55, + 0x04,0x08,0x00,0xd4,0x1c,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04,0x08,0x00,0x10, + 0x04,0x08,0x00,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd3, + 0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0d,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10, + 0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x08,0x00,0xd1,0x6c,0xd0,0x2a,0xcf,0x86,0x55, + 0x04,0x08,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10, + 0x04,0x00,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x0d, + 0x00,0x00,0x00,0x08,0x00,0xcf,0x86,0x55,0x04,0x08,0x00,0xd4,0x1c,0xd3,0x0c,0x52, + 0x04,0x08,0x00,0x11,0x04,0x08,0x00,0x0d,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00, + 0x00,0x10,0x04,0x00,0x00,0x08,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10, + 0x04,0x00,0x00,0x0c,0x09,0xd0,0x5a,0xcf,0x86,0xd5,0x18,0x54,0x04,0x08,0x00,0x93, + 0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x00, + 0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08, + 0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00, + 0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00, + 0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00,0x00,0xcf, + 0x86,0x95,0x40,0xd4,0x20,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10, + 0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08, + 0x00,0x00,0x00,0xd3,0x10,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08, + 0x00,0x00,0x00,0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x00, + 0x00,0x0a,0xe6,0xd2,0x9c,0xd1,0x68,0xd0,0x32,0xcf,0x86,0xd5,0x14,0x54,0x04,0x08, + 0x00,0x53,0x04,0x08,0x00,0x52,0x04,0x0a,0x00,0x11,0x04,0x08,0x00,0x0a,0x00,0x54, + 0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x0b,0x00,0x0d, + 0x00,0x0d,0x00,0x12,0x04,0x0d,0x00,0x10,0x00,0xcf,0x86,0x95,0x30,0x94,0x2c,0xd3, + 0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x12,0x00,0x91,0x08,0x10, + 0x04,0x12,0x00,0x13,0x00,0x13,0x00,0xd2,0x08,0x11,0x04,0x13,0x00,0x14,0x00,0x51, + 0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x15,0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf, + 0x86,0x95,0x18,0x54,0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51,0x04,0x04, + 0x00,0x10,0x04,0x00,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86,0x55,0x04,0x04, + 0x00,0x54,0x04,0x04,0x00,0x93,0x08,0x12,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0xd1, + 0x06,0xcf,0x06,0x04,0x00,0xd0,0x06,0xcf,0x06,0x04,0x00,0xcf,0x86,0xd5,0x14,0x54, + 0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04,0x00,0x00,0x00,0x00, + 0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x04,0x00,0x12,0x04,0x04,0x00,0x00,0x00,0xcf, + 0x86,0xe5,0xa6,0x05,0xe4,0x9f,0x05,0xe3,0x96,0x04,0xe2,0xe4,0x03,0xe1,0xc0,0x01, + 0xd0,0x3e,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x1c,0x53,0x04,0x01,0x00,0xd2,0x0c, + 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0xda,0x01,0xe4,0x91,0x08,0x10,0x04,0x01,0xe8, + 0x01,0xde,0x01,0xe0,0x53,0x04,0x01,0x00,0xd2,0x0c,0x51,0x04,0x04,0x00,0x10,0x04, + 0x04,0x00,0x06,0x00,0x51,0x04,0x06,0x00,0x10,0x04,0x04,0x00,0x01,0x00,0xcf,0x86, + 0xd5,0xaa,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00, + 0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81, + 0x8b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x8d,0xe3,0x82, + 0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81, + 0x8f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x91,0xe3,0x82, + 0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x93,0xe3,0x82,0x99, + 0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x95,0xe3,0x82,0x99,0x00,0x01,0x00, + 0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x97,0xe3,0x82,0x99,0x00,0x01, + 0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0x99,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f, + 0x10,0x0b,0x01,0xff,0xe3,0x81,0x9b,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01, + 0xff,0xe3,0x81,0x9d,0xe3,0x82,0x99,0x00,0x01,0x00,0xd4,0x53,0xd3,0x3c,0xd2,0x1e, + 0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0x9f,0xe3,0x82,0x99,0x00,0x01,0x00,0x10, + 0x0b,0x01,0xff,0xe3,0x81,0xa1,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x04, + 0x01,0x00,0x01,0xff,0xe3,0x81,0xa4,0xe3,0x82,0x99,0x00,0x10,0x04,0x01,0x00,0x01, + 0xff,0xe3,0x81,0xa6,0xe3,0x82,0x99,0x00,0x92,0x13,0x91,0x0f,0x10,0x04,0x01,0x00, + 0x01,0xff,0xe3,0x81,0xa8,0xe3,0x82,0x99,0x00,0x01,0x00,0x01,0x00,0xd3,0x4a,0xd2, + 0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xaf,0xe3,0x82,0x99,0x00,0x01,0xff, + 0xe3,0x81,0xaf,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x81,0xb2, + 0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb2,0xe3,0x82,0x9a, + 0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb5,0xe3,0x82,0x99,0x00,0x01,0xff, + 0xe3,0x81,0xb5,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01, + 0xff,0xe3,0x81,0xb8,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01,0xff,0xe3,0x81,0xb8,0xe3, + 0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82, + 0x99,0x00,0x01,0xff,0xe3,0x81,0xbb,0xe3,0x82,0x9a,0x00,0x01,0x00,0xd0,0xee,0xcf, + 0x86,0xd5,0x42,0x54,0x04,0x01,0x00,0xd3,0x1b,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10, + 0x0b,0x01,0xff,0xe3,0x81,0x86,0xe3,0x82,0x99,0x00,0x06,0x00,0x10,0x04,0x06,0x00, + 0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x00,0x00,0x01,0x08,0x10,0x04,0x01,0x08, + 0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0x9d,0xe3,0x82,0x99, + 0x00,0x06,0x00,0xd4,0x32,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x01, + 0x00,0x01,0x00,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3, + 0x82,0xab,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xad,0xe3, + 0x82,0x99,0x00,0x01,0x00,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3, + 0x82,0xaf,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb1,0xe3, + 0x82,0x99,0x00,0x01,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb3,0xe3,0x82, + 0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb5,0xe3,0x82,0x99,0x00,0x01, + 0x00,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb7,0xe3,0x82,0x99,0x00, + 0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x82,0xb9,0xe3,0x82,0x99,0x00,0x01,0x00,0xd1, + 0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbb,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x0b, + 0x01,0xff,0xe3,0x82,0xbd,0xe3,0x82,0x99,0x00,0x01,0x00,0xcf,0x86,0xd5,0xd5,0xd4, + 0x53,0xd3,0x3c,0xd2,0x1e,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3,0x82,0xbf,0xe3,0x82, + 0x99,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x81,0xe3,0x82,0x99,0x00,0x01, + 0x00,0xd1,0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x84,0xe3,0x82,0x99,0x00, + 0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x86,0xe3,0x82,0x99,0x00,0x92,0x13,0x91, + 0x0f,0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x88,0xe3,0x82,0x99,0x00,0x01,0x00, + 0x01,0x00,0xd3,0x4a,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff,0xe3,0x83,0x8f,0xe3, + 0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x8f,0xe3,0x82,0x9a,0x00,0x10,0x04,0x01,0x00, + 0x01,0xff,0xe3,0x83,0x92,0xe3,0x82,0x99,0x00,0xd1,0x0f,0x10,0x0b,0x01,0xff,0xe3, + 0x83,0x92,0xe3,0x82,0x9a,0x00,0x01,0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0x95,0xe3, + 0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x95,0xe3,0x82,0x9a,0x00,0xd2,0x1e,0xd1,0x0f, + 0x10,0x04,0x01,0x00,0x01,0xff,0xe3,0x83,0x98,0xe3,0x82,0x99,0x00,0x10,0x0b,0x01, + 0xff,0xe3,0x83,0x98,0xe3,0x82,0x9a,0x00,0x01,0x00,0x91,0x16,0x10,0x0b,0x01,0xff, + 0xe3,0x83,0x9b,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0x9b,0xe3,0x82,0x9a,0x00, + 0x01,0x00,0x54,0x04,0x01,0x00,0xd3,0x22,0x52,0x04,0x01,0x00,0xd1,0x0f,0x10,0x0b, + 0x01,0xff,0xe3,0x82,0xa6,0xe3,0x82,0x99,0x00,0x01,0x00,0x10,0x04,0x01,0x00,0x01, + 0xff,0xe3,0x83,0xaf,0xe3,0x82,0x99,0x00,0xd2,0x25,0xd1,0x16,0x10,0x0b,0x01,0xff, + 0xe3,0x83,0xb0,0xe3,0x82,0x99,0x00,0x01,0xff,0xe3,0x83,0xb1,0xe3,0x82,0x99,0x00, + 0x10,0x0b,0x01,0xff,0xe3,0x83,0xb2,0xe3,0x82,0x99,0x00,0x01,0x00,0x51,0x04,0x01, + 0x00,0x10,0x0b,0x01,0xff,0xe3,0x83,0xbd,0xe3,0x82,0x99,0x00,0x06,0x00,0xd1,0x65, + 0xd0,0x46,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x91,0x08, + 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd4,0x18,0x53,0x04, + 0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x0a,0x00,0x10,0x04, + 0x13,0x00,0x14,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00, + 0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x94,0x15,0x93,0x11, + 0x52,0x04,0x01,0x00,0x91,0x09,0x10,0x05,0x01,0xff,0x00,0x01,0x00,0x01,0x00,0x01, + 0x00,0x01,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x01,0x00,0x52, + 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x54, + 0x04,0x04,0x00,0x53,0x04,0x04,0x00,0x92,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c, + 0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x08,0x00,0x0a,0x00,0x94, + 0x0c,0x93,0x08,0x12,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x06,0x00,0xd2,0xa4,0xd1, + 0x5c,0xd0,0x22,0xcf,0x86,0x95,0x1c,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52, + 0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x07,0x00,0x10,0x04,0x07,0x00,0x00, + 0x00,0x01,0x00,0xcf,0x86,0xd5,0x20,0xd4,0x0c,0x93,0x08,0x12,0x04,0x01,0x00,0x0b, + 0x00,0x0b,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x06,0x00,0x06, + 0x00,0x06,0x00,0x06,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01, + 0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x08,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86,0x55, + 0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x01, + 0x00,0x06,0x00,0x06,0x00,0x06,0x00,0x06,0x00,0xcf,0x86,0xd5,0x10,0x94,0x0c,0x53, + 0x04,0x01,0x00,0x12,0x04,0x01,0x00,0x07,0x00,0x01,0x00,0x54,0x04,0x01,0x00,0x53, + 0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x16, + 0x00,0xd1,0x30,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54, + 0x04,0x01,0x00,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01, + 0x00,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x01,0x00,0x01, + 0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x01,0x00,0x53, + 0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x07,0x00,0x54,0x04,0x01, + 0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01, + 0x00,0x07,0x00,0xcf,0x06,0x04,0x00,0xcf,0x06,0x04,0x00,0xd1,0x48,0xd0,0x40,0xcf, + 0x86,0xd5,0x06,0xcf,0x06,0x04,0x00,0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x2c,0xd2, + 0x06,0xcf,0x06,0x04,0x00,0xd1,0x06,0xcf,0x06,0x04,0x00,0xd0,0x1a,0xcf,0x86,0x55, + 0x04,0x04,0x00,0x54,0x04,0x04,0x00,0x93,0x0c,0x52,0x04,0x04,0x00,0x11,0x04,0x04, + 0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x07,0x00,0xcf,0x06,0x01,0x00,0xcf,0x86,0xcf, + 0x06,0x01,0x00,0xcf,0x86,0xcf,0x06,0x01,0x00,0xe2,0x71,0x05,0xd1,0x8c,0xd0,0x08, + 0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xd4,0x06, + 0xcf,0x06,0x01,0x00,0xd3,0x06,0xcf,0x06,0x01,0x00,0xd2,0x06,0xcf,0x06,0x01,0x00, + 0xd1,0x06,0xcf,0x06,0x01,0x00,0xd0,0x22,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x10, + 0x93,0x0c,0x52,0x04,0x01,0x00,0x11,0x04,0x01,0x00,0x08,0x00,0x08,0x00,0x53,0x04, + 0x08,0x00,0x12,0x04,0x08,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0xd3,0x08, + 0x12,0x04,0x0a,0x00,0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0d,0x00, + 0x11,0x00,0x11,0x00,0x93,0x0c,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x13,0x00, + 0x13,0x00,0x94,0x14,0x53,0x04,0x13,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10,0x04, + 0x13,0x00,0x14,0x00,0x14,0x00,0x00,0x00,0xe0,0xdb,0x04,0xcf,0x86,0xe5,0xdf,0x01, + 0xd4,0x06,0xcf,0x06,0x04,0x00,0xd3,0x74,0xd2,0x6e,0xd1,0x06,0xcf,0x06,0x04,0x00, + 0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x04,0x00,0x52,0x04,0x04,0x00, + 0x91,0x08,0x10,0x04,0x04,0x00,0x00,0x00,0x00,0x00,0x04,0x00,0xd4,0x10,0x93,0x0c, + 0x92,0x08,0x11,0x04,0x04,0x00,0x06,0x00,0x04,0x00,0x04,0x00,0x93,0x10,0x52,0x04, + 0x04,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x04,0x00,0x04,0x00,0x04,0x00,0xcf,0x86, + 0x95,0x24,0x94,0x20,0x93,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x04,0x00,0x06,0x00, + 0x04,0x00,0xd1,0x08,0x10,0x04,0x04,0x00,0x06,0x00,0x10,0x04,0x04,0x00,0x00,0x00, + 0x00,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x06,0x0a,0x00,0xd2,0x84,0xd1,0x4c,0xd0,0x16, + 0xcf,0x86,0x55,0x04,0x0a,0x00,0x94,0x0c,0x53,0x04,0x0a,0x00,0x12,0x04,0x0a,0x00, + 0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x0a,0x00,0xd4,0x1c,0xd3,0x0c,0x92,0x08, + 0x11,0x04,0x0c,0x00,0x0a,0x00,0x0a,0x00,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00, + 0x10,0x04,0x0a,0x00,0x0a,0xe6,0xd3,0x08,0x12,0x04,0x0a,0x00,0x0d,0xe6,0x52,0x04, + 0x0d,0xe6,0x11,0x04,0x0a,0xe6,0x0a,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04, + 0x0a,0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04, + 0x11,0xe6,0x0d,0xe6,0x0b,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00, + 0x93,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x00,0x00,0xd1,0x40, + 0xd0,0x3a,0xcf,0x86,0xd5,0x24,0x54,0x04,0x08,0x00,0xd3,0x10,0x52,0x04,0x08,0x00, + 0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x09,0x00,0x92,0x0c,0x51,0x04,0x09,0x00, + 0x10,0x04,0x09,0x00,0x0a,0x00,0x0a,0x00,0x94,0x10,0x93,0x0c,0x92,0x08,0x11,0x04, + 0x09,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0x0a,0x00,0xcf,0x06,0x0a,0x00,0xd0,0x5e, + 0xcf,0x86,0xd5,0x28,0xd4,0x18,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0xd1,0x08, + 0x10,0x04,0x0a,0x00,0x0c,0x00,0x10,0x04,0x0c,0x00,0x11,0x00,0x93,0x0c,0x92,0x08, + 0x11,0x04,0x0c,0x00,0x0d,0x00,0x10,0x00,0x10,0x00,0xd4,0x1c,0x53,0x04,0x0c,0x00, + 0xd2,0x0c,0x51,0x04,0x0c,0x00,0x10,0x04,0x0d,0x00,0x10,0x00,0x51,0x04,0x10,0x00, + 0x10,0x04,0x12,0x00,0x14,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00, + 0x11,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x1c, + 0x94,0x18,0x93,0x14,0xd2,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x51,0x04,0x15,0x00, + 0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x54,0x04,0x00,0x00,0xd3,0x10, + 0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x92,0x0c, + 0x51,0x04,0x0d,0x00,0x10,0x04,0x0c,0x00,0x0a,0x00,0x0a,0x00,0xe4,0xf2,0x02,0xe3, + 0x65,0x01,0xd2,0x98,0xd1,0x48,0xd0,0x36,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10, + 0x52,0x04,0x08,0x00,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x09,0x08,0x00,0x08,0x00, + 0x08,0x00,0xd4,0x0c,0x53,0x04,0x08,0x00,0x12,0x04,0x08,0x00,0x00,0x00,0x53,0x04, + 0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04, + 0x09,0x00,0x54,0x04,0x09,0x00,0x13,0x04,0x09,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06, + 0x0a,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0a,0x00,0x91,0x08, + 0x10,0x04,0x0a,0x09,0x12,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00, + 0x0a,0x00,0x53,0x04,0x0a,0x00,0x92,0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x00,0x00, + 0x54,0x04,0x0b,0xe6,0xd3,0x0c,0x92,0x08,0x11,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00, + 0x52,0x04,0x0b,0x00,0x11,0x04,0x11,0x00,0x14,0x00,0xd1,0x60,0xd0,0x22,0xcf,0x86, + 0x55,0x04,0x0a,0x00,0x94,0x18,0x53,0x04,0x0a,0x00,0xd2,0x0c,0x51,0x04,0x0a,0x00, + 0x10,0x04,0x0a,0x00,0x0a,0xdc,0x11,0x04,0x0a,0xdc,0x0a,0x00,0x0a,0x00,0xcf,0x86, + 0xd5,0x24,0x54,0x04,0x0a,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x0a,0x00,0x10,0x04, + 0x0a,0x00,0x0a,0x09,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04, + 0x00,0x00,0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00, + 0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04, + 0x0b,0x00,0x54,0x04,0x0b,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04, + 0x0b,0x00,0x0b,0x07,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x34,0xd4,0x20,0xd3,0x10, + 0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x09,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x52,0x04, + 0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x53,0x04,0x0b,0x00, + 0xd2,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x0b,0x00,0x54,0x04, + 0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04, + 0x10,0x00,0x00,0x00,0xd2,0xd0,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0a,0x00, + 0x54,0x04,0x0a,0x00,0x93,0x10,0x52,0x04,0x0a,0x00,0x51,0x04,0x0a,0x00,0x10,0x04, + 0x0a,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x20,0xd4,0x10,0x53,0x04,0x0a,0x00, + 0x52,0x04,0x0a,0x00,0x11,0x04,0x0a,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x92,0x08, + 0x11,0x04,0x0a,0x00,0x00,0x00,0x0a,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00, + 0x12,0x04,0x0b,0x00,0x10,0x00,0xd0,0x3a,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04, + 0x0b,0x00,0xd3,0x1c,0xd2,0x0c,0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0xe6, + 0xd1,0x08,0x10,0x04,0x0b,0xdc,0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xe6,0xd2,0x0c, + 0x91,0x08,0x10,0x04,0x0b,0xe6,0x0b,0x00,0x0b,0x00,0x11,0x04,0x0b,0x00,0x0b,0xe6, + 0xcf,0x86,0xd5,0x2c,0xd4,0x18,0x93,0x14,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00, + 0x0b,0xe6,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x00,0x00, + 0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04, + 0x0d,0x00,0x93,0x10,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09, + 0x00,0x00,0x00,0x00,0xd1,0x8c,0xd0,0x72,0xcf,0x86,0xd5,0x4c,0xd4,0x30,0xd3,0x18, 0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00, - 0x10,0x04,0x0c,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00, - 0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00, - 0x94,0x20,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00, - 0x00,0x00,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00, - 0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x11,0x00, - 0x11,0x04,0x10,0x00,0x15,0x00,0x00,0x00,0x11,0x00,0xd0,0x06,0xcf,0x06,0x11,0x00, - 0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00, - 0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x00,0x00,0x53,0x04,0x0b,0x00,0x92,0x08, - 0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x02,0xff,0xff,0xcf,0x86,0xcf, - 0x06,0x02,0xff,0xff,0xd1,0x76,0xd0,0x09,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xcf, - 0x86,0x85,0xd4,0x07,0xcf,0x06,0x02,0xff,0xff,0xd3,0x07,0xcf,0x06,0x02,0xff,0xff, - 0xd2,0x07,0xcf,0x06,0x02,0xff,0xff,0xd1,0x07,0xcf,0x06,0x02,0xff,0xff,0xd0,0x18, - 0xcf,0x86,0x55,0x05,0x02,0xff,0xff,0x94,0x0d,0x93,0x09,0x12,0x05,0x02,0xff,0xff, - 0x00,0x00,0x00,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04, - 0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x51,0x04, - 0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0x54,0x04,0x0b,0x00, - 0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06, - 0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00,0xe4,0x9c,0x10,0xe3,0x16,0x08, - 0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x08,0x04,0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01, - 0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xb1,0x88, - 0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0x10,0x08,0x01,0xff,0xe8,0xbb,0x8a,0x00,0x01, - 0xff,0xe8,0xb3,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xbb,0x91,0x00,0x01, - 0xff,0xe4,0xb8,0xb2,0x00,0x10,0x08,0x01,0xff,0xe5,0x8f,0xa5,0x00,0x01,0xff,0xe9, - 0xbe,0x9c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xbe,0x9c,0x00,0x01, - 0xff,0xe5,0xa5,0x91,0x00,0x10,0x08,0x01,0xff,0xe9,0x87,0x91,0x00,0x01,0xff,0xe5, - 0x96,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa5,0x88,0x00,0x01,0xff,0xe6, - 0x87,0xb6,0x00,0x10,0x08,0x01,0xff,0xe7,0x99,0xa9,0x00,0x01,0xff,0xe7,0xbe,0x85, - 0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x98,0xbf,0x00,0x01, - 0xff,0xe8,0x9e,0xba,0x00,0x10,0x08,0x01,0xff,0xe8,0xa3,0xb8,0x00,0x01,0xff,0xe9, - 0x82,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x82,0x00,0x01,0xff,0xe6, - 0xb4,0x9b,0x00,0x10,0x08,0x01,0xff,0xe7,0x83,0x99,0x00,0x01,0xff,0xe7,0x8f,0x9e, - 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x90,0xbd,0x00,0x01,0xff,0xe9, - 0x85,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0xa7,0xb1,0x00,0x01,0xff,0xe4,0xba,0x82, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x8d,0xb5,0x00,0x01,0xff,0xe6,0xac,0x84, - 0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x9b,0x00,0x01,0xff,0xe8,0x98,0xad,0x00,0xd4, - 0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xb8,0x9e,0x00,0x01, - 0xff,0xe5,0xb5,0x90,0x00,0x10,0x08,0x01,0xff,0xe6,0xbf,0xab,0x00,0x01,0xff,0xe8, - 0x97,0x8d,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa5,0xa4,0x00,0x01,0xff,0xe6, - 0x8b,0x89,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0x98,0x00,0x01,0xff,0xe8,0xa0,0x9f, - 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xbb,0x8a,0x00,0x01,0xff,0xe6, - 0x9c,0x97,0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0xaa,0x00,0x01,0xff,0xe7,0x8b,0xbc, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x83,0x8e,0x00,0x01,0xff,0xe4,0xbe,0x86, - 0x00,0x10,0x08,0x01,0xff,0xe5,0x86,0xb7,0x00,0x01,0xff,0xe5,0x8b,0x9e,0x00,0xd3, - 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x93,0x84,0x00,0x01,0xff,0xe6, - 0xab,0x93,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x90,0x00,0x01,0xff,0xe7,0x9b,0xa7, - 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x80,0x81,0x00,0x01,0xff,0xe8,0x98,0x86, - 0x00,0x10,0x08,0x01,0xff,0xe8,0x99,0x9c,0x00,0x01,0xff,0xe8,0xb7,0xaf,0x00,0xd2, - 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9c,0xb2,0x00,0x01,0xff,0xe9,0xad,0xaf, - 0x00,0x10,0x08,0x01,0xff,0xe9,0xb7,0xba,0x00,0x01,0xff,0xe7,0xa2,0x8c,0x00,0xd1, - 0x10,0x10,0x08,0x01,0xff,0xe7,0xa5,0xbf,0x00,0x01,0xff,0xe7,0xb6,0xa0,0x00,0x10, - 0x08,0x01,0xff,0xe8,0x8f,0x89,0x00,0x01,0xff,0xe9,0x8c,0x84,0x00,0xcf,0x86,0xe5, - 0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xb9, - 0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0x10,0x08,0x01,0xff,0xe5,0xa3,0x9f,0x00, - 0x01,0xff,0xe5,0xbc,0x84,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xb1,0xa0,0x00, - 0x01,0xff,0xe8,0x81,0xbe,0x00,0x10,0x08,0x01,0xff,0xe7,0x89,0xa2,0x00,0x01,0xff, - 0xe7,0xa3,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xb3,0x82,0x00, - 0x01,0xff,0xe9,0x9b,0xb7,0x00,0x10,0x08,0x01,0xff,0xe5,0xa3,0x98,0x00,0x01,0xff, - 0xe5,0xb1,0xa2,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x93,0x00,0x01,0xff, - 0xe6,0xb7,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc,0x8f,0x00,0x01,0xff,0xe7,0xb4, - 0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xb8,0xb7,0x00, - 0x01,0xff,0xe9,0x99,0x8b,0x00,0x10,0x08,0x01,0xff,0xe5,0x8b,0x92,0x00,0x01,0xff, - 0xe8,0x82,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x87,0x9c,0x00,0x01,0xff, - 0xe5,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe7,0xa8,0x9c,0x00,0x01,0xff,0xe7,0xb6, - 0xbe,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8f,0xb1,0x00,0x01,0xff, - 0xe9,0x99,0xb5,0x00,0x10,0x08,0x01,0xff,0xe8,0xae,0x80,0x00,0x01,0xff,0xe6,0x8b, - 0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8,0x82,0x00,0x01,0xff,0xe8,0xab, - 0xbe,0x00,0x10,0x08,0x01,0xff,0xe4,0xb8,0xb9,0x00,0x01,0xff,0xe5,0xaf,0xa7,0x00, - 0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x80,0x92,0x00, - 0x01,0xff,0xe7,0x8e,0x87,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xb0,0x00,0x01,0xff, - 0xe5,0x8c,0x97,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa3,0xbb,0x00,0x01,0xff, - 0xe4,0xbe,0xbf,0x00,0x10,0x08,0x01,0xff,0xe5,0xbe,0xa9,0x00,0x01,0xff,0xe4,0xb8, - 0x8d,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xb3,0x8c,0x00,0x01,0xff, - 0xe6,0x95,0xb8,0x00,0x10,0x08,0x01,0xff,0xe7,0xb4,0xa2,0x00,0x01,0xff,0xe5,0x8f, - 0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa1,0x9e,0x00,0x01,0xff,0xe7,0x9c, - 0x81,0x00,0x10,0x08,0x01,0xff,0xe8,0x91,0x89,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00, - 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xae,0xba,0x00,0x01,0xff, - 0xe8,0xbe,0xb0,0x00,0x10,0x08,0x01,0xff,0xe6,0xb2,0x88,0x00,0x01,0xff,0xe6,0x8b, - 0xbe,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8b,0xa5,0x00,0x01,0xff,0xe6,0x8e, - 0xa0,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xa5,0x00,0x01,0xff,0xe4,0xba,0xae,0x00, - 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,0xa9,0x00,0x01,0xff,0xe5,0x87, - 0x89,0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0x81,0x00,0x01,0xff,0xe7,0xb3,0xa7,0x00, - 0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x89,0xaf,0x00,0x01,0xff,0xe8,0xab,0x92,0x00, - 0x10,0x08,0x01,0xff,0xe9,0x87,0x8f,0x00,0x01,0xff,0xe5,0x8b,0xb5,0x00,0xe0,0x04, - 0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, - 0x01,0xff,0xe5,0x91,0x82,0x00,0x01,0xff,0xe5,0xa5,0xb3,0x00,0x10,0x08,0x01,0xff, - 0xe5,0xbb,0xac,0x00,0x01,0xff,0xe6,0x97,0x85,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, - 0xe6,0xbf,0xbe,0x00,0x01,0xff,0xe7,0xa4,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0x96, - 0xad,0x00,0x01,0xff,0xe9,0xa9,0xaa,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0xe9,0xba,0x97,0x00,0x01,0xff,0xe9,0xbb,0x8e,0x00,0x10,0x08,0x01,0xff,0xe5,0x8a, - 0x9b,0x00,0x01,0xff,0xe6,0x9b,0x86,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xad, - 0xb7,0x00,0x01,0xff,0xe8,0xbd,0xa2,0x00,0x10,0x08,0x01,0xff,0xe5,0xb9,0xb4,0x00, - 0x01,0xff,0xe6,0x86,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0xe6,0x88,0x80,0x00,0x01,0xff,0xe6,0x92,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc, - 0xa3,0x00,0x01,0xff,0xe7,0x85,0x89,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0x92, - 0x89,0x00,0x01,0xff,0xe7,0xa7,0x8a,0x00,0x10,0x08,0x01,0xff,0xe7,0xb7,0xb4,0x00, - 0x01,0xff,0xe8,0x81,0xaf,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xbc, - 0xa6,0x00,0x01,0xff,0xe8,0x93,0xae,0x00,0x10,0x08,0x01,0xff,0xe9,0x80,0xa3,0x00, - 0x01,0xff,0xe9,0x8d,0x8a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x88,0x97,0x00, - 0x01,0xff,0xe5,0x8a,0xa3,0x00,0x10,0x08,0x01,0xff,0xe5,0x92,0xbd,0x00,0x01,0xff, - 0xe7,0x83,0x88,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0xe8,0xa3,0x82,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00,0x10,0x08,0x01,0xff,0xe5,0xbb, - 0x89,0x00,0x01,0xff,0xe5,0xbf,0xb5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x8d, - 0xbb,0x00,0x01,0xff,0xe6,0xae,0xae,0x00,0x10,0x08,0x01,0xff,0xe7,0xb0,0xbe,0x00, - 0x01,0xff,0xe7,0x8d,0xb5,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe4,0xbb, - 0xa4,0x00,0x01,0xff,0xe5,0x9b,0xb9,0x00,0x10,0x08,0x01,0xff,0xe5,0xaf,0xa7,0x00, - 0x01,0xff,0xe5,0xb6,0xba,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x80,0x9c,0x00, - 0x01,0xff,0xe7,0x8e,0xb2,0x00,0x10,0x08,0x01,0xff,0xe7,0x91,0xa9,0x00,0x01,0xff, - 0xe7,0xbe,0x9a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x81, - 0x86,0x00,0x01,0xff,0xe9,0x88,0xb4,0x00,0x10,0x08,0x01,0xff,0xe9,0x9b,0xb6,0x00, - 0x01,0xff,0xe9,0x9d,0x88,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa0,0x98,0x00, - 0x01,0xff,0xe4,0xbe,0x8b,0x00,0x10,0x08,0x01,0xff,0xe7,0xa6,0xae,0x00,0x01,0xff, - 0xe9,0x86,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9a,0xb8,0x00, - 0x01,0xff,0xe6,0x83,0xa1,0x00,0x10,0x08,0x01,0xff,0xe4,0xba,0x86,0x00,0x01,0xff, - 0xe5,0x83,0x9a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xaf,0xae,0x00,0x01,0xff, - 0xe5,0xb0,0xbf,0x00,0x10,0x08,0x01,0xff,0xe6,0x96,0x99,0x00,0x01,0xff,0xe6,0xa8, - 0x82,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x01,0xff,0xe7,0x87,0x8e,0x00,0x01,0xff,0xe7,0x99,0x82,0x00,0x10,0x08,0x01, - 0xff,0xe8,0x93,0xbc,0x00,0x01,0xff,0xe9,0x81,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xe9,0xbe,0x8d,0x00,0x01,0xff,0xe6,0x9a,0x88,0x00,0x10,0x08,0x01,0xff,0xe9, - 0x98,0xae,0x00,0x01,0xff,0xe5,0x8a,0x89,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xe6,0x9d,0xbb,0x00,0x01,0xff,0xe6,0x9f,0xb3,0x00,0x10,0x08,0x01,0xff,0xe6, - 0xb5,0x81,0x00,0x01,0xff,0xe6,0xba,0x9c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7, - 0x90,0x89,0x00,0x01,0xff,0xe7,0x95,0x99,0x00,0x10,0x08,0x01,0xff,0xe7,0xa1,0xab, - 0x00,0x01,0xff,0xe7,0xb4,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xe9,0xa1,0x9e,0x00,0x01,0xff,0xe5,0x85,0xad,0x00,0x10,0x08,0x01,0xff,0xe6, - 0x88,0xae,0x00,0x01,0xff,0xe9,0x99,0xb8,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5, - 0x80,0xab,0x00,0x01,0xff,0xe5,0xb4,0x99,0x00,0x10,0x08,0x01,0xff,0xe6,0xb7,0xaa, - 0x00,0x01,0xff,0xe8,0xbc,0xaa,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5, - 0xbe,0x8b,0x00,0x01,0xff,0xe6,0x85,0x84,0x00,0x10,0x08,0x01,0xff,0xe6,0xa0,0x97, - 0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9a,0x86, - 0x00,0x01,0xff,0xe5,0x88,0xa9,0x00,0x10,0x08,0x01,0xff,0xe5,0x90,0x8f,0x00,0x01, - 0xff,0xe5,0xb1,0xa5,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, - 0xff,0xe6,0x98,0x93,0x00,0x01,0xff,0xe6,0x9d,0x8e,0x00,0x10,0x08,0x01,0xff,0xe6, - 0xa2,0xa8,0x00,0x01,0xff,0xe6,0xb3,0xa5,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7, - 0x90,0x86,0x00,0x01,0xff,0xe7,0x97,0xa2,0x00,0x10,0x08,0x01,0xff,0xe7,0xbd,0xb9, - 0x00,0x01,0xff,0xe8,0xa3,0x8f,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8, - 0xa3,0xa1,0x00,0x01,0xff,0xe9,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe9,0x9b,0xa2, - 0x00,0x01,0xff,0xe5,0x8c,0xbf,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xba,0xba, - 0x00,0x01,0xff,0xe5,0x90,0x9d,0x00,0x10,0x08,0x01,0xff,0xe7,0x87,0x90,0x00,0x01, - 0xff,0xe7,0x92,0x98,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8, - 0x97,0xba,0x00,0x01,0xff,0xe9,0x9a,0xa3,0x00,0x10,0x08,0x01,0xff,0xe9,0xb1,0x97, - 0x00,0x01,0xff,0xe9,0xba,0x9f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x9e,0x97, - 0x00,0x01,0xff,0xe6,0xb7,0x8b,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0xa8,0x00,0x01, - 0xff,0xe7,0xab,0x8b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xac,0xa0, - 0x00,0x01,0xff,0xe7,0xb2,0x92,0x00,0x10,0x08,0x01,0xff,0xe7,0x8b,0x80,0x00,0x01, - 0xff,0xe7,0x82,0x99,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xad,0x98,0x00,0x01, - 0xff,0xe4,0xbb,0x80,0x00,0x10,0x08,0x01,0xff,0xe8,0x8c,0xb6,0x00,0x01,0xff,0xe5, - 0x88,0xba,0x00,0xe2,0xad,0x06,0xe1,0xc4,0x03,0xe0,0xcb,0x01,0xcf,0x86,0xd5,0xe4, - 0xd4,0x74,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x88,0x87,0x00, - 0x01,0xff,0xe5,0xba,0xa6,0x00,0x10,0x08,0x01,0xff,0xe6,0x8b,0x93,0x00,0x01,0xff, - 0xe7,0xb3,0x96,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xae,0x85,0x00,0x01,0xff, - 0xe6,0xb4,0x9e,0x00,0x10,0x08,0x01,0xff,0xe6,0x9a,0xb4,0x00,0x01,0xff,0xe8,0xbc, - 0xbb,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa1,0x8c,0x00,0x01,0xff, - 0xe9,0x99,0x8d,0x00,0x10,0x08,0x01,0xff,0xe8,0xa6,0x8b,0x00,0x01,0xff,0xe5,0xbb, - 0x93,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0xe5,0x85,0x80,0x00,0x01,0xff,0xe5,0x97, - 0x80,0x00,0x01,0x00,0xd3,0x34,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x01,0xff,0xe5,0xa1, - 0x9a,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe6,0x99,0xb4,0x00,0x01,0x00,0xd1,0x0c, - 0x10,0x04,0x01,0x00,0x01,0xff,0xe5,0x87,0x9e,0x00,0x10,0x08,0x01,0xff,0xe7,0x8c, - 0xaa,0x00,0x01,0xff,0xe7,0x9b,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, - 0xe7,0xa4,0xbc,0x00,0x01,0xff,0xe7,0xa5,0x9e,0x00,0x10,0x08,0x01,0xff,0xe7,0xa5, - 0xa5,0x00,0x01,0xff,0xe7,0xa6,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9d, - 0x96,0x00,0x01,0xff,0xe7,0xb2,0xbe,0x00,0x10,0x08,0x01,0xff,0xe7,0xbe,0xbd,0x00, - 0x01,0x00,0xd4,0x64,0xd3,0x30,0xd2,0x18,0xd1,0x0c,0x10,0x08,0x01,0xff,0xe8,0x98, - 0x92,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe8,0xab,0xb8,0x00,0x01,0x00,0xd1,0x0c, - 0x10,0x04,0x01,0x00,0x01,0xff,0xe9,0x80,0xb8,0x00,0x10,0x08,0x01,0xff,0xe9,0x83, - 0xbd,0x00,0x01,0x00,0xd2,0x14,0x51,0x04,0x01,0x00,0x10,0x08,0x01,0xff,0xe9,0xa3, - 0xaf,0x00,0x01,0xff,0xe9,0xa3,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa4, - 0xa8,0x00,0x01,0xff,0xe9,0xb6,0xb4,0x00,0x10,0x08,0x0d,0xff,0xe9,0x83,0x9e,0x00, - 0x0d,0xff,0xe9,0x9a,0xb7,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff, - 0xe4,0xbe,0xae,0x00,0x06,0xff,0xe5,0x83,0xa7,0x00,0x10,0x08,0x06,0xff,0xe5,0x85, - 0x8d,0x00,0x06,0xff,0xe5,0x8b,0x89,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0x8b, - 0xa4,0x00,0x06,0xff,0xe5,0x8d,0x91,0x00,0x10,0x08,0x06,0xff,0xe5,0x96,0x9d,0x00, - 0x06,0xff,0xe5,0x98,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0x99, - 0xa8,0x00,0x06,0xff,0xe5,0xa1,0x80,0x00,0x10,0x08,0x06,0xff,0xe5,0xa2,0xa8,0x00, - 0x06,0xff,0xe5,0xb1,0xa4,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe5,0xb1,0xae,0x00, - 0x06,0xff,0xe6,0x82,0x94,0x00,0x10,0x08,0x06,0xff,0xe6,0x85,0xa8,0x00,0x06,0xff, - 0xe6,0x86,0x8e,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1, - 0x10,0x10,0x08,0x06,0xff,0xe6,0x87,0xb2,0x00,0x06,0xff,0xe6,0x95,0x8f,0x00,0x10, - 0x08,0x06,0xff,0xe6,0x97,0xa2,0x00,0x06,0xff,0xe6,0x9a,0x91,0x00,0xd1,0x10,0x10, - 0x08,0x06,0xff,0xe6,0xa2,0x85,0x00,0x06,0xff,0xe6,0xb5,0xb7,0x00,0x10,0x08,0x06, - 0xff,0xe6,0xb8,0x9a,0x00,0x06,0xff,0xe6,0xbc,0xa2,0x00,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x06,0xff,0xe7,0x85,0xae,0x00,0x06,0xff,0xe7,0x88,0xab,0x00,0x10,0x08,0x06, - 0xff,0xe7,0x90,0xa2,0x00,0x06,0xff,0xe7,0xa2,0x91,0x00,0xd1,0x10,0x10,0x08,0x06, - 0xff,0xe7,0xa4,0xbe,0x00,0x06,0xff,0xe7,0xa5,0x89,0x00,0x10,0x08,0x06,0xff,0xe7, - 0xa5,0x88,0x00,0x06,0xff,0xe7,0xa5,0x90,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x06,0xff,0xe7,0xa5,0x96,0x00,0x06,0xff,0xe7,0xa5,0x9d,0x00,0x10,0x08,0x06, - 0xff,0xe7,0xa6,0x8d,0x00,0x06,0xff,0xe7,0xa6,0x8e,0x00,0xd1,0x10,0x10,0x08,0x06, - 0xff,0xe7,0xa9,0x80,0x00,0x06,0xff,0xe7,0xaa,0x81,0x00,0x10,0x08,0x06,0xff,0xe7, - 0xaf,0x80,0x00,0x06,0xff,0xe7,0xb7,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06, - 0xff,0xe7,0xb8,0x89,0x00,0x06,0xff,0xe7,0xb9,0x81,0x00,0x10,0x08,0x06,0xff,0xe7, - 0xbd,0xb2,0x00,0x06,0xff,0xe8,0x80,0x85,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8, - 0x87,0xad,0x00,0x06,0xff,0xe8,0x89,0xb9,0x00,0x10,0x08,0x06,0xff,0xe8,0x89,0xb9, - 0x00,0x06,0xff,0xe8,0x91,0x97,0x00,0xd4,0x75,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10, - 0x08,0x06,0xff,0xe8,0xa4,0x90,0x00,0x06,0xff,0xe8,0xa6,0x96,0x00,0x10,0x08,0x06, - 0xff,0xe8,0xac,0x81,0x00,0x06,0xff,0xe8,0xac,0xb9,0x00,0xd1,0x10,0x10,0x08,0x06, - 0xff,0xe8,0xb3,0x93,0x00,0x06,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08,0x06,0xff,0xe8, - 0xbe,0xb6,0x00,0x06,0xff,0xe9,0x80,0xb8,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06, - 0xff,0xe9,0x9b,0xa3,0x00,0x06,0xff,0xe9,0x9f,0xbf,0x00,0x10,0x08,0x06,0xff,0xe9, - 0xa0,0xbb,0x00,0x0b,0xff,0xe6,0x81,0xb5,0x00,0x91,0x11,0x10,0x09,0x0b,0xff,0xf0, - 0xa4,0x8b,0xae,0x00,0x0b,0xff,0xe8,0x88,0x98,0x00,0x00,0x00,0xd3,0x40,0xd2,0x20, - 0xd1,0x10,0x10,0x08,0x08,0xff,0xe4,0xb8,0xa6,0x00,0x08,0xff,0xe5,0x86,0xb5,0x00, - 0x10,0x08,0x08,0xff,0xe5,0x85,0xa8,0x00,0x08,0xff,0xe4,0xbe,0x80,0x00,0xd1,0x10, - 0x10,0x08,0x08,0xff,0xe5,0x85,0x85,0x00,0x08,0xff,0xe5,0x86,0x80,0x00,0x10,0x08, - 0x08,0xff,0xe5,0x8b,0x87,0x00,0x08,0xff,0xe5,0x8b,0xba,0x00,0xd2,0x20,0xd1,0x10, - 0x10,0x08,0x08,0xff,0xe5,0x96,0x9d,0x00,0x08,0xff,0xe5,0x95,0x95,0x00,0x10,0x08, - 0x08,0xff,0xe5,0x96,0x99,0x00,0x08,0xff,0xe5,0x97,0xa2,0x00,0xd1,0x10,0x10,0x08, - 0x08,0xff,0xe5,0xa1,0x9a,0x00,0x08,0xff,0xe5,0xa2,0xb3,0x00,0x10,0x08,0x08,0xff, - 0xe5,0xa5,0x84,0x00,0x08,0xff,0xe5,0xa5,0x94,0x00,0xe0,0x04,0x02,0xcf,0x86,0xe5, - 0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xa9, - 0xa2,0x00,0x08,0xff,0xe5,0xac,0xa8,0x00,0x10,0x08,0x08,0xff,0xe5,0xbb,0x92,0x00, - 0x08,0xff,0xe5,0xbb,0x99,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xbd,0xa9,0x00, - 0x08,0xff,0xe5,0xbe,0xad,0x00,0x10,0x08,0x08,0xff,0xe6,0x83,0x98,0x00,0x08,0xff, - 0xe6,0x85,0x8e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x84,0x88,0x00, - 0x08,0xff,0xe6,0x86,0x8e,0x00,0x10,0x08,0x08,0xff,0xe6,0x85,0xa0,0x00,0x08,0xff, - 0xe6,0x87,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x88,0xb4,0x00,0x08,0xff, - 0xe6,0x8f,0x84,0x00,0x10,0x08,0x08,0xff,0xe6,0x90,0x9c,0x00,0x08,0xff,0xe6,0x91, - 0x92,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x95,0x96,0x00, - 0x08,0xff,0xe6,0x99,0xb4,0x00,0x10,0x08,0x08,0xff,0xe6,0x9c,0x97,0x00,0x08,0xff, - 0xe6,0x9c,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0x9d,0x96,0x00,0x08,0xff, - 0xe6,0xad,0xb9,0x00,0x10,0x08,0x08,0xff,0xe6,0xae,0xba,0x00,0x08,0xff,0xe6,0xb5, - 0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe6,0xbb,0x9b,0x00,0x08,0xff, - 0xe6,0xbb,0x8b,0x00,0x10,0x08,0x08,0xff,0xe6,0xbc,0xa2,0x00,0x08,0xff,0xe7,0x80, - 0x9e,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x85,0xae,0x00,0x08,0xff,0xe7,0x9e, - 0xa7,0x00,0x10,0x08,0x08,0xff,0xe7,0x88,0xb5,0x00,0x08,0xff,0xe7,0x8a,0xaf,0x00, - 0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x8c,0xaa,0x00, - 0x08,0xff,0xe7,0x91,0xb1,0x00,0x10,0x08,0x08,0xff,0xe7,0x94,0x86,0x00,0x08,0xff, - 0xe7,0x94,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x98,0x9d,0x00,0x08,0xff, - 0xe7,0x98,0x9f,0x00,0x10,0x08,0x08,0xff,0xe7,0x9b,0x8a,0x00,0x08,0xff,0xe7,0x9b, - 0x9b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x9b,0xb4,0x00,0x08,0xff, - 0xe7,0x9d,0x8a,0x00,0x10,0x08,0x08,0xff,0xe7,0x9d,0x80,0x00,0x08,0xff,0xe7,0xa3, - 0x8c,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xaa,0xb1,0x00,0x08,0xff,0xe7,0xaf, - 0x80,0x00,0x10,0x08,0x08,0xff,0xe7,0xb1,0xbb,0x00,0x08,0xff,0xe7,0xb5,0x9b,0x00, - 0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xb7,0xb4,0x00,0x08,0xff, - 0xe7,0xbc,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0x80,0x85,0x00,0x08,0xff,0xe8,0x8d, - 0x92,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0x8f,0xaf,0x00,0x08,0xff,0xe8,0x9d, - 0xb9,0x00,0x10,0x08,0x08,0xff,0xe8,0xa5,0x81,0x00,0x08,0xff,0xe8,0xa6,0x86,0x00, - 0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xa6,0x96,0x00,0x08,0xff,0xe8,0xaa, - 0xbf,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xb8,0x00,0x08,0xff,0xe8,0xab,0x8b,0x00, - 0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xac,0x81,0x00,0x08,0xff,0xe8,0xab,0xbe,0x00, - 0x10,0x08,0x08,0xff,0xe8,0xab,0xad,0x00,0x08,0xff,0xe8,0xac,0xb9,0x00,0xcf,0x86, - 0x95,0xde,0xd4,0x81,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xae, - 0x8a,0x00,0x08,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08,0x08,0xff,0xe8,0xbc,0xb8,0x00, - 0x08,0xff,0xe9,0x81,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0x86,0x99,0x00, - 0x08,0xff,0xe9,0x89,0xb6,0x00,0x10,0x08,0x08,0xff,0xe9,0x99,0xbc,0x00,0x08,0xff, - 0xe9,0x9b,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0x9d,0x96,0x00, - 0x08,0xff,0xe9,0x9f,0x9b,0x00,0x10,0x08,0x08,0xff,0xe9,0x9f,0xbf,0x00,0x08,0xff, - 0xe9,0xa0,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe9,0xa0,0xbb,0x00,0x08,0xff, - 0xe9,0xac,0x92,0x00,0x10,0x08,0x08,0xff,0xe9,0xbe,0x9c,0x00,0x08,0xff,0xf0,0xa2, - 0xa1,0x8a,0x00,0xd3,0x45,0xd2,0x22,0xd1,0x12,0x10,0x09,0x08,0xff,0xf0,0xa2,0xa1, - 0x84,0x00,0x08,0xff,0xf0,0xa3,0x8f,0x95,0x00,0x10,0x08,0x08,0xff,0xe3,0xae,0x9d, - 0x00,0x08,0xff,0xe4,0x80,0x98,0x00,0xd1,0x11,0x10,0x08,0x08,0xff,0xe4,0x80,0xb9, - 0x00,0x08,0xff,0xf0,0xa5,0x89,0x89,0x00,0x10,0x09,0x08,0xff,0xf0,0xa5,0xb3,0x90, - 0x00,0x08,0xff,0xf0,0xa7,0xbb,0x93,0x00,0x92,0x14,0x91,0x10,0x10,0x08,0x08,0xff, - 0xe9,0xbd,0x83,0x00,0x08,0xff,0xe9,0xbe,0x8e,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0xe1,0x94,0x01,0xe0,0x08,0x01,0xcf,0x86,0xd5,0x42,0xd4,0x14,0x93,0x10,0x52,0x04, - 0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd3,0x10, - 0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x52,0x04, - 0x00,0x00,0xd1,0x0d,0x10,0x04,0x00,0x00,0x04,0xff,0xd7,0x99,0xd6,0xb4,0x00,0x10, - 0x04,0x01,0x1a,0x01,0xff,0xd7,0xb2,0xd6,0xb7,0x00,0xd4,0x42,0x53,0x04,0x01,0x00, - 0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff,0xd7,0xa9,0xd7,0x81,0x00,0x01, - 0xff,0xd7,0xa9,0xd7,0x82,0x00,0xd1,0x16,0x10,0x0b,0x01,0xff,0xd7,0xa9,0xd6,0xbc, - 0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7,0x82,0x00,0x10,0x09,0x01,0xff, - 0xd7,0x90,0xd6,0xb7,0x00,0x01,0xff,0xd7,0x90,0xd6,0xb8,0x00,0xd3,0x43,0xd2,0x24, - 0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x91,0xd6, - 0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x92,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x93,0xd6, - 0xbc,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x94,0xd6,0xbc,0x00,0x01,0xff,0xd7, - 0x95,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x96,0xd6,0xbc,0x00,0x00,0x00,0xd2, - 0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x98,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x99, - 0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x9a,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x9b, - 0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0x9c,0xd6,0xbc,0x00,0x00,0x00, - 0x10,0x09,0x01,0xff,0xd7,0x9e,0xd6,0xbc,0x00,0x00,0x00,0xcf,0x86,0x95,0x85,0x94, - 0x81,0xd3,0x3e,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa0,0xd6,0xbc,0x00, - 0x01,0xff,0xd7,0xa1,0xd6,0xbc,0x00,0x10,0x04,0x00,0x00,0x01,0xff,0xd7,0xa3,0xd6, - 0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0xa4,0xd6,0xbc,0x00,0x00,0x00,0x10, - 0x09,0x01,0xff,0xd7,0xa6,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa7,0xd6,0xbc,0x00,0xd2, - 0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa8,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa9, - 0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0xaa,0xd6,0xbc,0x00,0x01,0xff,0xd7,0x95, - 0xd6,0xb9,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x91,0xd6,0xbf,0x00,0x01,0xff, - 0xd7,0x9b,0xd6,0xbf,0x00,0x10,0x09,0x01,0xff,0xd7,0xa4,0xd6,0xbf,0x00,0x01,0x00, - 0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00, - 0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0xcf,0x86, - 0x95,0x24,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00, - 0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x5a,0xd2,0x06,0xcf,0x06,0x01,0x00,0xd1,0x14, - 0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x08,0x14,0x04,0x00,0x00,0x01,0x00, - 0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54,0x04,0x01,0x00,0x93,0x0c,0x92,0x08, - 0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x0c, - 0x94,0x08,0x13,0x04,0x01,0x00,0x00,0x00,0x05,0x00,0x54,0x04,0x05,0x00,0x53,0x04, - 0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04,0x06,0x00,0x07,0x00,0x00,0x00, - 0xd2,0xcc,0xd1,0xa4,0xd0,0x36,0xcf,0x86,0xd5,0x14,0x54,0x04,0x06,0x00,0x53,0x04, - 0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x94,0x1c,0xd3,0x10, - 0x52,0x04,0x01,0xe6,0x51,0x04,0x0a,0xe6,0x10,0x04,0x0a,0xe6,0x10,0xdc,0x52,0x04, - 0x10,0xdc,0x11,0x04,0x10,0xdc,0x11,0xe6,0x01,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24, - 0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x06,0x00,0x10,0x04, - 0x06,0x00,0x07,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x01,0x00,0x01,0x00, - 0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00, - 0x01,0x00,0x01,0x00,0xd4,0x18,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00, - 0x10,0x04,0x01,0x00,0x00,0x00,0x12,0x04,0x01,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c, - 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x91,0x08,0x10,0x04,0x01,0x00, - 0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x55,0x04, - 0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0xd1,0x08, - 0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x01,0x00,0xd1,0x50,0xd0,0x1e, - 0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00, - 0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18, - 0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00, - 0x10,0x04,0x01,0x00,0x06,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04, - 0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1e,0xcf,0x86, - 0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00, - 0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x18, - 0xd3,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x92,0x08,0x11,0x04, - 0x00,0x00,0x01,0x00,0x01,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00, - 0x01,0x00,0xd2,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00, - 0x00,0x00,0x00,0x00,0xd4,0x20,0xd3,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00, - 0x10,0x04,0x01,0x00,0x00,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04, - 0x01,0x00,0x00,0x00,0x53,0x04,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00, - 0x04,0x00,0x04,0x00,0x91,0x08,0x10,0x04,0x03,0x00,0x01,0x00,0x01,0x00,0x83,0xe2, - 0x30,0x3e,0xe1,0x1a,0x3b,0xe0,0x97,0x39,0xcf,0x86,0xe5,0x3b,0x26,0xc4,0xe3,0x16, - 0x14,0xe2,0xef,0x11,0xe1,0xd0,0x10,0xe0,0x60,0x07,0xcf,0x86,0xe5,0x53,0x03,0xe4, - 0x4c,0x02,0xe3,0x3d,0x01,0xd2,0x94,0xd1,0x70,0xd0,0x4a,0xcf,0x86,0xd5,0x18,0x94, - 0x14,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07, - 0x00,0x07,0x00,0x07,0x00,0xd4,0x14,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07, - 0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x07,0x00,0x53,0x04,0x07,0x00,0xd2,0x0c,0x51, - 0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00, - 0x00,0x07,0x00,0xcf,0x86,0x95,0x20,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04,0x07, - 0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11, - 0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55, - 0x04,0x07,0x00,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x92,0x0c,0x51,0x04,0x07, - 0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5, - 0x20,0x94,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00, - 0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54, - 0x04,0x07,0x00,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00, - 0x00,0x07,0x00,0x07,0x00,0xcf,0x06,0x08,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4, - 0x20,0x53,0x04,0x08,0x00,0xd2,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x10, - 0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x53, - 0x04,0x0a,0x00,0x12,0x04,0x0a,0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91, - 0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf, - 0x86,0xd5,0x08,0x14,0x04,0x00,0x00,0x0a,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a, - 0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x00,0x00,0xd2, - 0x5e,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a, - 0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00, - 0x00,0x00,0x00,0x0a,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x0a,0x00,0x93,0x10,0x92, - 0x0c,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4, - 0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0xdc,0x10,0x00,0x10,0x00,0x10, - 0x00,0x10,0x00,0x53,0x04,0x10,0x00,0x12,0x04,0x10,0x00,0x00,0x00,0xd1,0x70,0xd0, - 0x36,0xcf,0x86,0xd5,0x18,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05, - 0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x10,0x00,0x94,0x18,0xd3,0x08,0x12, - 0x04,0x05,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x13, - 0x00,0x13,0x00,0x05,0x00,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x05,0x00,0x92, - 0x0c,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x54, - 0x04,0x10,0x00,0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x10,0xe6,0x92, - 0x0c,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf, - 0x86,0x95,0x18,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x51, - 0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x08,0x00,0xcf,0x86,0x95,0x1c,0xd4, - 0x0c,0x93,0x08,0x12,0x04,0x08,0x00,0x00,0x00,0x08,0x00,0x93,0x0c,0x52,0x04,0x08, - 0x00,0x11,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0xba,0xd2,0x80,0xd1, - 0x34,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x05, - 0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0x95,0x14,0x94, - 0x10,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x07, - 0x00,0x07,0x00,0xd0,0x2a,0xcf,0x86,0xd5,0x14,0x54,0x04,0x07,0x00,0x53,0x04,0x07, - 0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x07, - 0x00,0x92,0x08,0x11,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xcf,0x86,0xd5, - 0x10,0x54,0x04,0x12,0x00,0x93,0x08,0x12,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x54, - 0x04,0x12,0x00,0x53,0x04,0x12,0x00,0x12,0x04,0x12,0x00,0x00,0x00,0xd1,0x34,0xd0, - 0x12,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x10, - 0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x10,0x00,0x00, - 0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x00, - 0x00,0xcf,0x06,0x00,0x00,0xd2,0x06,0xcf,0x06,0x10,0x00,0xd1,0x40,0xd0,0x1e,0xcf, - 0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51, - 0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x14,0x54, - 0x04,0x10,0x00,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x00, - 0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4, - 0xce,0x02,0xe3,0x45,0x01,0xd2,0xd0,0xd1,0x70,0xd0,0x52,0xcf,0x86,0xd5,0x20,0x94, - 0x1c,0xd3,0x0c,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x92,0x0c,0x91, - 0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07, - 0x00,0xd3,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07, - 0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x08,0x10, - 0x04,0x07,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86,0x95,0x18,0x54, - 0x04,0x0b,0x00,0x93,0x10,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00, - 0x00,0x0b,0x00,0x0b,0x00,0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10, - 0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10, - 0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10, - 0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54, - 0x04,0x11,0x00,0xd3,0x14,0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00, - 0x00,0x11,0x04,0x11,0x00,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00, - 0x00,0x11,0x00,0x11,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x1c,0x54,0x04,0x09, - 0x00,0x53,0x04,0x09,0x00,0xd2,0x08,0x11,0x04,0x09,0x00,0x0b,0x00,0x51,0x04,0x00, - 0x00,0x10,0x04,0x00,0x00,0x09,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2, - 0x08,0x11,0x04,0x0a,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a, - 0x00,0xcf,0x06,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d, - 0x00,0x53,0x04,0x0d,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x11,0x00,0x0d,0x00,0xcf, - 0x86,0x95,0x14,0x54,0x04,0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x11, - 0x00,0x11,0x00,0x11,0x00,0x11,0x00,0xd2,0xec,0xd1,0xa4,0xd0,0x76,0xcf,0x86,0xd5, - 0x48,0xd4,0x28,0xd3,0x14,0x52,0x04,0x08,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x08, - 0x00,0x10,0x04,0x08,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x08, - 0x00,0x08,0xdc,0x10,0x04,0x08,0x00,0x08,0xe6,0xd3,0x10,0x52,0x04,0x08,0x00,0x91, - 0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00, - 0x00,0x08,0x00,0x08,0x00,0x08,0x00,0x54,0x04,0x08,0x00,0xd3,0x0c,0x52,0x04,0x08, - 0x00,0x11,0x04,0x14,0x00,0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe6,0x08, - 0x01,0x10,0x04,0x08,0xdc,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08, - 0x09,0xcf,0x86,0x95,0x28,0xd4,0x14,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10, - 0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0x92,0x0c,0x91, - 0x08,0x10,0x04,0x08,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x0a,0xcf, - 0x86,0x15,0x04,0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x24,0xd3, - 0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0xe6,0x10,0x04,0x10, - 0xdc,0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10, - 0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00, - 0x00,0x00,0x00,0xd1,0x54,0xd0,0x26,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b, - 0x00,0xd3,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x91, - 0x08,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x14,0x54, - 0x04,0x0b,0x00,0x93,0x0c,0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x0b, - 0x00,0x54,0x04,0x0b,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b, - 0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x54,0x04,0x10, - 0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c,0x91, - 0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00, - 0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00, - 0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x96,0xd2, - 0x68,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x0b,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53, - 0x04,0x0b,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11, - 0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0xd3,0x10,0x92, - 0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x92,0x08,0x11, - 0x04,0x00,0x00,0x11,0x00,0x11,0x00,0xd1,0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14, - 0x00,0xd4,0x0c,0x93,0x08,0x12,0x04,0x14,0x00,0x14,0xe6,0x00,0x00,0x53,0x04,0x14, - 0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf, - 0x06,0x00,0x00,0xd2,0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55, - 0x04,0x00,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51, - 0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x58,0xd0, - 0x12,0xcf,0x86,0x55,0x04,0x14,0x00,0x94,0x08,0x13,0x04,0x14,0x00,0x00,0x00,0x14, - 0x00,0xcf,0x86,0x95,0x40,0xd4,0x24,0xd3,0x0c,0x52,0x04,0x14,0x00,0x11,0x04,0x14, - 0x00,0x14,0xdc,0xd2,0x0c,0x51,0x04,0x14,0xe6,0x10,0x04,0x14,0xe6,0x14,0xdc,0x91, - 0x08,0x10,0x04,0x14,0xe6,0x14,0xdc,0x14,0xdc,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10, - 0x04,0x14,0xdc,0x14,0x00,0x14,0x00,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00, - 0x00,0x54,0x04,0x15,0x00,0x93,0x10,0x52,0x04,0x15,0x00,0x51,0x04,0x15,0x00,0x10, - 0x04,0x15,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xe5,0x0f,0x06,0xe4,0xf8,0x03,0xe3, - 0x02,0x02,0xd2,0xfb,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0c,0x00,0xcf,0x86,0xd5,0x2c, - 0xd4,0x1c,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x09, - 0x0c,0x00,0x52,0x04,0x0c,0x00,0x11,0x04,0x0c,0x00,0x00,0x00,0x93,0x0c,0x92,0x08, - 0x11,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04, - 0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x09, - 0xd0,0x69,0xcf,0x86,0xd5,0x32,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x15, - 0x51,0x04,0x0b,0x00,0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x99,0xf0,0x91,0x82,0xba, - 0x00,0x0b,0x00,0x91,0x11,0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x9b,0xf0,0x91,0x82, - 0xba,0x00,0x0b,0x00,0x0b,0x00,0xd4,0x1d,0x53,0x04,0x0b,0x00,0x92,0x15,0x51,0x04, - 0x0b,0x00,0x10,0x04,0x0b,0x00,0x0b,0xff,0xf0,0x91,0x82,0xa5,0xf0,0x91,0x82,0xba, - 0x00,0x0b,0x00,0x53,0x04,0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b, - 0x09,0x10,0x04,0x0b,0x07,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3, - 0x0c,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91, - 0x08,0x10,0x04,0x00,0x00,0x14,0x00,0x00,0x00,0x0d,0x00,0xd4,0x14,0x53,0x04,0x0d, - 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53, - 0x04,0x0d,0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0xd1,0x96,0xd0, - 0x5c,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x0d,0xe6,0x10, - 0x04,0x0d,0xe6,0x0d,0x00,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd4,0x26,0x53,0x04,0x0d, - 0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x0d,0x0d,0xff,0xf0,0x91,0x84, - 0xb1,0xf0,0x91,0x84,0xa7,0x00,0x0d,0xff,0xf0,0x91,0x84,0xb2,0xf0,0x91,0x84,0xa7, - 0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x0d,0x09,0x91, - 0x08,0x10,0x04,0x0d,0x09,0x00,0x00,0x0d,0x00,0x0d,0x00,0xcf,0x86,0xd5,0x18,0x94, - 0x14,0x93,0x10,0x52,0x04,0x0d,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00, - 0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x10, - 0x00,0x10,0x04,0x10,0x00,0x10,0x07,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00, - 0x00,0x00,0x00,0xd0,0x06,0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3, - 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x0d,0x09,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd2, - 0x10,0xd1,0x08,0x10,0x04,0x0d,0x00,0x11,0x00,0x10,0x04,0x11,0x07,0x11,0x00,0x91, - 0x08,0x10,0x04,0x11,0x00,0x10,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x0c,0x51, - 0x04,0x0d,0x00,0x10,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0x92, - 0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x93, - 0x10,0x52,0x04,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0xd2,0xc8,0xd1,0x48,0xd0,0x42,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x93, - 0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10, - 0x00,0x54,0x04,0x10,0x00,0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10, - 0x00,0x10,0x09,0x10,0x04,0x10,0x07,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10, - 0x00,0x10,0x04,0x12,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x52,0xcf,0x86,0xd5, - 0x3c,0xd4,0x28,0xd3,0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11, - 0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x00,0x51, - 0x04,0x11,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11, - 0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x94,0x10,0x53,0x04,0x11, - 0x00,0x92,0x08,0x11,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55, - 0x04,0x10,0x00,0xd4,0x18,0x53,0x04,0x10,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x10, - 0x00,0x10,0x07,0x10,0x04,0x10,0x09,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92, - 0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xe1,0x27,0x01,0xd0,0x8a,0xcf,0x86, - 0xd5,0x44,0xd4,0x2c,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x10,0x00, - 0x10,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10,0x00, - 0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x93,0x14, - 0x92,0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00, - 0x10,0x00,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04, - 0x10,0x00,0x00,0x00,0x10,0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04, - 0x10,0x00,0x00,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00, - 0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x14,0x07,0x91,0x08,0x10,0x04, - 0x10,0x07,0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x6a,0xd4,0x42,0xd3,0x14,0x52,0x04, - 0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00, - 0xd2,0x19,0xd1,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0xff, - 0xf0,0x91,0x8d,0x87,0xf0,0x91,0x8c,0xbe,0x00,0x91,0x11,0x10,0x0d,0x10,0xff,0xf0, - 0x91,0x8d,0x87,0xf0,0x91,0x8d,0x97,0x00,0x10,0x09,0x00,0x00,0xd3,0x18,0xd2,0x0c, - 0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04, - 0x00,0x00,0x10,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00, - 0x10,0x00,0xd4,0x1c,0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0xe6, - 0x52,0x04,0x10,0xe6,0x91,0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x93,0x10, - 0x52,0x04,0x10,0xe6,0x91,0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x00,0x00, - 0xcf,0x06,0x00,0x00,0xe3,0x30,0x01,0xd2,0xb7,0xd1,0x48,0xd0,0x06,0xcf,0x06,0x12, - 0x00,0xcf,0x86,0x95,0x3c,0xd4,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10, - 0x04,0x12,0x09,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x07,0x12,0x00,0x12, - 0x00,0x53,0x04,0x12,0x00,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x00,0x00,0x12, - 0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x12,0x00,0x10,0x04,0x14,0xe6,0x15,0x00,0x00, - 0x00,0xd0,0x45,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10, - 0x00,0xd2,0x15,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10,0xff,0xf0,0x91,0x92, - 0xb9,0xf0,0x91,0x92,0xba,0x00,0xd1,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9, - 0xf0,0x91,0x92,0xb0,0x00,0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0, - 0x91,0x92,0xbd,0x00,0x10,0x00,0xcf,0x86,0x95,0x24,0xd4,0x14,0x93,0x10,0x92,0x0c, - 0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x09,0x10,0x07,0x10,0x00,0x00,0x00,0x53,0x04, - 0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x06, - 0xcf,0x06,0x00,0x00,0xd0,0x40,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00, - 0xd3,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0xd2,0x1e,0x51,0x04, - 0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x96,0xb8,0xf0,0x91,0x96,0xaf,0x00,0x10, - 0xff,0xf0,0x91,0x96,0xb9,0xf0,0x91,0x96,0xaf,0x00,0x51,0x04,0x10,0x00,0x10,0x04, - 0x10,0x00,0x10,0x09,0xcf,0x86,0x95,0x2c,0xd4,0x1c,0xd3,0x10,0x92,0x0c,0x91,0x08, - 0x10,0x04,0x10,0x07,0x10,0x00,0x10,0x00,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00, - 0x11,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00, - 0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1,0x5c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00, - 0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00, - 0x10,0x04,0x10,0x00,0x10,0x09,0xcf,0x86,0xd5,0x24,0xd4,0x14,0x93,0x10,0x52,0x04, - 0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04, - 0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14,0x53,0x04, - 0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0xd3,0x10, - 0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x0d,0x07,0x92,0x0c, - 0x91,0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x14, - 0x94,0x10,0x53,0x04,0x0d,0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x11,0x00, - 0x53,0x04,0x11,0x00,0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x14,0x00,0x00,0x00, - 0x91,0x08,0x10,0x04,0x00,0x00,0x11,0x00,0x11,0x00,0x94,0x14,0x53,0x04,0x11,0x00, - 0x92,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x11,0x09,0x00,0x00,0x11,0x00, - 0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4,0x59,0x01,0xd3,0xb2,0xd2,0x5c,0xd1, - 0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14, - 0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x14,0x00,0x14,0x09,0x10,0x04,0x14,0x07,0x14, - 0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x00,0x00,0x10, - 0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x10,0x92,0x0c,0x51, - 0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51, - 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0, - 0x1a,0xcf,0x86,0x55,0x04,0x00,0x00,0x94,0x10,0x53,0x04,0x15,0x00,0x92,0x08,0x11, - 0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x15, - 0x00,0x53,0x04,0x15,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x94, - 0x1c,0x93,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x15,0x09,0x15,0x00,0x15,0x00,0x91, - 0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1, - 0x3c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x93,0x10,0x52, - 0x04,0x13,0x00,0x91,0x08,0x10,0x04,0x13,0x09,0x13,0x00,0x13,0x00,0x13,0x00,0xcf, - 0x86,0x95,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10, - 0x04,0x13,0x00,0x13,0x09,0x00,0x00,0x13,0x00,0x13,0x00,0xd0,0x46,0xcf,0x86,0xd5, - 0x2c,0xd4,0x10,0x93,0x0c,0x52,0x04,0x13,0x00,0x11,0x04,0x15,0x00,0x13,0x00,0x13, - 0x00,0x53,0x04,0x13,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x13,0x09,0x13, - 0x00,0x91,0x08,0x10,0x04,0x13,0x00,0x14,0x00,0x13,0x00,0x94,0x14,0x93,0x10,0x92, - 0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92, - 0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00, - 0x00,0xe3,0xa9,0x01,0xd2,0xb0,0xd1,0x6c,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14, - 0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x12,0x00, - 0x12,0x00,0x12,0x00,0x54,0x04,0x12,0x00,0xd3,0x10,0x52,0x04,0x12,0x00,0x51,0x04, - 0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00, - 0x10,0x04,0x12,0x00,0x12,0x09,0xcf,0x86,0xd5,0x14,0x94,0x10,0x93,0x0c,0x52,0x04, - 0x12,0x00,0x11,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x94,0x14,0x53,0x04, - 0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00, - 0x12,0x00,0xd0,0x3e,0xcf,0x86,0xd5,0x14,0x54,0x04,0x12,0x00,0x93,0x0c,0x92,0x08, - 0x11,0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00, - 0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x93,0x10, - 0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00, - 0xcf,0x06,0x00,0x00,0xd1,0xa0,0xd0,0x52,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10, - 0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x92,0x0c, - 0x51,0x04,0x13,0x00,0x10,0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x54,0x04, - 0x13,0x00,0xd3,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00, - 0x00,0x00,0xd2,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x51,0x04, - 0x13,0x00,0x10,0x04,0x00,0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0x93,0x14, - 0xd2,0x0c,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x07,0x13,0x00,0x11,0x04,0x13,0x09, - 0x13,0x00,0x00,0x00,0x53,0x04,0x13,0x00,0x92,0x08,0x11,0x04,0x13,0x00,0x00,0x00, - 0x00,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04, - 0x00,0x00,0x14,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x14,0x00, - 0x14,0x00,0x14,0x00,0xd0,0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x14,0x53,0x04,0x14,0x00, - 0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0xd3,0x18, - 0xd2,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x51,0x04,0x14,0x00, - 0x10,0x04,0x14,0x00,0x14,0x09,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00, - 0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06, - 0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04, - 0x14,0x00,0x53,0x04,0x14,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00, - 0xcf,0x86,0x55,0x04,0x15,0x00,0x54,0x04,0x15,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04, - 0x15,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04, - 0x00,0x00,0x15,0x00,0xd0,0xca,0xcf,0x86,0xd5,0xc2,0xd4,0x54,0xd3,0x06,0xcf,0x06, - 0x09,0x00,0xd2,0x06,0xcf,0x06,0x09,0x00,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x09,0x00, - 0xcf,0x86,0x55,0x04,0x09,0x00,0x94,0x14,0x53,0x04,0x09,0x00,0x52,0x04,0x09,0x00, - 0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00,0x10,0x00,0xd0,0x1e,0xcf,0x86, - 0x95,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04, - 0x10,0x00,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x68, - 0xd2,0x46,0xd1,0x40,0xd0,0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09,0x00, - 0xd4,0x20,0xd3,0x10,0x92,0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00, - 0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00, - 0x93,0x10,0x52,0x04,0x09,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0xcf,0x06,0x11,0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86, - 0x95,0x10,0x94,0x0c,0x93,0x08,0x12,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86, - 0xd5,0x4c,0xd4,0x06,0xcf,0x06,0x0b,0x00,0xd3,0x40,0xd2,0x3a,0xd1,0x34,0xd0,0x2e, - 0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00, - 0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x53,0x04,0x15,0x00,0x92,0x0c, - 0x91,0x08,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00, - 0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00, - 0xd1,0x4c,0xd0,0x44,0xcf,0x86,0xd5,0x3c,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06, - 0xcf,0x06,0x11,0x00,0xd2,0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86, - 0x95,0x18,0x94,0x14,0x93,0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04, - 0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06, - 0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0xd2,0x01,0xcf, - 0x86,0xd5,0x06,0xcf,0x06,0x00,0x00,0xe4,0x0b,0x01,0xd3,0x06,0xcf,0x06,0x0c,0x00, - 0xd2,0x84,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x0c,0x00,0x54,0x04,0x0c,0x00, - 0x53,0x04,0x0c,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04, - 0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14,0x53,0x04, - 0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00, - 0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x00,0x00, - 0x10,0x00,0xd4,0x10,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00, - 0x00,0x00,0x93,0x10,0x52,0x04,0x10,0x01,0x91,0x08,0x10,0x04,0x10,0x01,0x10,0x00, - 0x00,0x00,0x00,0x00,0xd1,0x6c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04, - 0x10,0x00,0x93,0x10,0x52,0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6, - 0x10,0x00,0x10,0x00,0xcf,0x86,0xd5,0x24,0xd4,0x10,0x93,0x0c,0x52,0x04,0x10,0x00, - 0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04, - 0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c, - 0x51,0x04,0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04, - 0x10,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00, - 0xd0,0x0e,0xcf,0x86,0x95,0x08,0x14,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06, - 0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2,0x30,0xd1,0x0c,0xd0,0x06,0xcf,0x06, - 0x00,0x00,0xcf,0x06,0x14,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x14,0x00, - 0x53,0x04,0x14,0x00,0x92,0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00, - 0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0d,0x00, - 0xcf,0x86,0xd5,0x2c,0x94,0x28,0xd3,0x10,0x52,0x04,0x0d,0x00,0x91,0x08,0x10,0x04, - 0x0d,0x00,0x15,0x00,0x15,0x00,0xd2,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00, - 0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0d,0x00,0x54,0x04, - 0x0d,0x00,0x53,0x04,0x0d,0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04, - 0x0d,0x00,0x15,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x15,0x00, - 0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x0d,0x00, - 0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08, - 0x10,0x04,0x12,0x00,0x13,0x00,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86, - 0xcf,0x06,0x12,0x00,0xe2,0xc5,0x01,0xd1,0x8e,0xd0,0x86,0xcf,0x86,0xd5,0x48,0xd4, - 0x06,0xcf,0x06,0x12,0x00,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x06,0xcf,0x06,0x12, - 0x00,0xd1,0x06,0xcf,0x06,0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55, - 0x04,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10, - 0x04,0x12,0x00,0x14,0x00,0x14,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x14,0x00,0x15, - 0x00,0x15,0x00,0x00,0x00,0xd4,0x36,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x2a,0xd1, - 0x06,0xcf,0x06,0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12, - 0x00,0x54,0x04,0x12,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12, + 0x10,0x04,0x0c,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00, + 0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x93,0x18,0xd2,0x0c, + 0x91,0x08,0x10,0x04,0x00,0x00,0x0c,0x00,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04, + 0x0c,0x00,0x00,0x00,0x00,0x00,0x94,0x20,0xd3,0x10,0x52,0x04,0x0c,0x00,0x51,0x04, + 0x0c,0x00,0x10,0x04,0x0c,0x00,0x00,0x00,0x52,0x04,0x0c,0x00,0x51,0x04,0x0c,0x00, + 0x10,0x04,0x0c,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0x94,0x10, + 0x93,0x0c,0x52,0x04,0x11,0x00,0x11,0x04,0x10,0x00,0x15,0x00,0x00,0x00,0x11,0x00, + 0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x55,0x04,0x0b,0x00,0xd4,0x14,0x53,0x04, + 0x0b,0x00,0x52,0x04,0x0b,0x00,0x91,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x00,0x00, + 0x53,0x04,0x0b,0x00,0x92,0x08,0x11,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0xcf,0x06, + 0x02,0xff,0xff,0xcf,0x86,0xcf,0x06,0x02,0xff,0xff,0xd1,0x76,0xd0,0x09,0xcf,0x86, + 0xcf,0x06,0x02,0xff,0xff,0xcf,0x86,0x85,0xd4,0x07,0xcf,0x06,0x02,0xff,0xff,0xd3, + 0x07,0xcf,0x06,0x02,0xff,0xff,0xd2,0x07,0xcf,0x06,0x02,0xff,0xff,0xd1,0x07,0xcf, + 0x06,0x02,0xff,0xff,0xd0,0x18,0xcf,0x86,0x55,0x05,0x02,0xff,0xff,0x94,0x0d,0x93, + 0x09,0x12,0x05,0x02,0xff,0xff,0x00,0x00,0x00,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x24, + 0x94,0x20,0xd3,0x10,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00, + 0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00, + 0x0b,0x00,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0x12,0x04,0x0b,0x00,0x00,0x00, + 0xd0,0x08,0xcf,0x86,0xcf,0x06,0x01,0x00,0xcf,0x86,0xd5,0x06,0xcf,0x06,0x01,0x00, + 0xe4,0x9c,0x10,0xe3,0x16,0x08,0xd2,0x06,0xcf,0x06,0x01,0x00,0xe1,0x08,0x04,0xe0, + 0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10, + 0x08,0x01,0xff,0xe8,0xb1,0x88,0x00,0x01,0xff,0xe6,0x9b,0xb4,0x00,0x10,0x08,0x01, + 0xff,0xe8,0xbb,0x8a,0x00,0x01,0xff,0xe8,0xb3,0x88,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0xe6,0xbb,0x91,0x00,0x01,0xff,0xe4,0xb8,0xb2,0x00,0x10,0x08,0x01,0xff,0xe5, + 0x8f,0xa5,0x00,0x01,0xff,0xe9,0xbe,0x9c,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, + 0xff,0xe9,0xbe,0x9c,0x00,0x01,0xff,0xe5,0xa5,0x91,0x00,0x10,0x08,0x01,0xff,0xe9, + 0x87,0x91,0x00,0x01,0xff,0xe5,0x96,0x87,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5, + 0xa5,0x88,0x00,0x01,0xff,0xe6,0x87,0xb6,0x00,0x10,0x08,0x01,0xff,0xe7,0x99,0xa9, + 0x00,0x01,0xff,0xe7,0xbe,0x85,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, + 0xff,0xe8,0x98,0xbf,0x00,0x01,0xff,0xe8,0x9e,0xba,0x00,0x10,0x08,0x01,0xff,0xe8, + 0xa3,0xb8,0x00,0x01,0xff,0xe9,0x82,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6, + 0xa8,0x82,0x00,0x01,0xff,0xe6,0xb4,0x9b,0x00,0x10,0x08,0x01,0xff,0xe7,0x83,0x99, + 0x00,0x01,0xff,0xe7,0x8f,0x9e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8, + 0x90,0xbd,0x00,0x01,0xff,0xe9,0x85,0xaa,0x00,0x10,0x08,0x01,0xff,0xe9,0xa7,0xb1, + 0x00,0x01,0xff,0xe4,0xba,0x82,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x8d,0xb5, + 0x00,0x01,0xff,0xe6,0xac,0x84,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x9b,0x00,0x01, + 0xff,0xe8,0x98,0xad,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01, + 0xff,0xe9,0xb8,0x9e,0x00,0x01,0xff,0xe5,0xb5,0x90,0x00,0x10,0x08,0x01,0xff,0xe6, + 0xbf,0xab,0x00,0x01,0xff,0xe8,0x97,0x8d,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8, + 0xa5,0xa4,0x00,0x01,0xff,0xe6,0x8b,0x89,0x00,0x10,0x08,0x01,0xff,0xe8,0x87,0x98, + 0x00,0x01,0xff,0xe8,0xa0,0x9f,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5, + 0xbb,0x8a,0x00,0x01,0xff,0xe6,0x9c,0x97,0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0xaa, + 0x00,0x01,0xff,0xe7,0x8b,0xbc,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x83,0x8e, + 0x00,0x01,0xff,0xe4,0xbe,0x86,0x00,0x10,0x08,0x01,0xff,0xe5,0x86,0xb7,0x00,0x01, + 0xff,0xe5,0x8b,0x9e,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6, + 0x93,0x84,0x00,0x01,0xff,0xe6,0xab,0x93,0x00,0x10,0x08,0x01,0xff,0xe7,0x88,0x90, + 0x00,0x01,0xff,0xe7,0x9b,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x80,0x81, + 0x00,0x01,0xff,0xe8,0x98,0x86,0x00,0x10,0x08,0x01,0xff,0xe8,0x99,0x9c,0x00,0x01, + 0xff,0xe8,0xb7,0xaf,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0x9c,0xb2, + 0x00,0x01,0xff,0xe9,0xad,0xaf,0x00,0x10,0x08,0x01,0xff,0xe9,0xb7,0xba,0x00,0x01, + 0xff,0xe7,0xa2,0x8c,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa5,0xbf,0x00,0x01, + 0xff,0xe7,0xb6,0xa0,0x00,0x10,0x08,0x01,0xff,0xe8,0x8f,0x89,0x00,0x01,0xff,0xe9, + 0x8c,0x84,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe9,0xb9,0xbf,0x00,0x01,0xff,0xe8,0xab,0x96,0x00,0x10,0x08, + 0x01,0xff,0xe5,0xa3,0x9f,0x00,0x01,0xff,0xe5,0xbc,0x84,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe7,0xb1,0xa0,0x00,0x01,0xff,0xe8,0x81,0xbe,0x00,0x10,0x08,0x01,0xff, + 0xe7,0x89,0xa2,0x00,0x01,0xff,0xe7,0xa3,0x8a,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe8,0xb3,0x82,0x00,0x01,0xff,0xe9,0x9b,0xb7,0x00,0x10,0x08,0x01,0xff, + 0xe5,0xa3,0x98,0x00,0x01,0xff,0xe5,0xb1,0xa2,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe6,0xa8,0x93,0x00,0x01,0xff,0xe6,0xb7,0x9a,0x00,0x10,0x08,0x01,0xff,0xe6,0xbc, + 0x8f,0x00,0x01,0xff,0xe7,0xb4,0xaf,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe7,0xb8,0xb7,0x00,0x01,0xff,0xe9,0x99,0x8b,0x00,0x10,0x08,0x01,0xff, + 0xe5,0x8b,0x92,0x00,0x01,0xff,0xe8,0x82,0x8b,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe5,0x87,0x9c,0x00,0x01,0xff,0xe5,0x87,0x8c,0x00,0x10,0x08,0x01,0xff,0xe7,0xa8, + 0x9c,0x00,0x01,0xff,0xe7,0xb6,0xbe,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe8,0x8f,0xb1,0x00,0x01,0xff,0xe9,0x99,0xb5,0x00,0x10,0x08,0x01,0xff,0xe8,0xae, + 0x80,0x00,0x01,0xff,0xe6,0x8b,0x8f,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xa8, + 0x82,0x00,0x01,0xff,0xe8,0xab,0xbe,0x00,0x10,0x08,0x01,0xff,0xe4,0xb8,0xb9,0x00, + 0x01,0xff,0xe5,0xaf,0xa7,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe6,0x80,0x92,0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0x10,0x08,0x01,0xff, + 0xe7,0x95,0xb0,0x00,0x01,0xff,0xe5,0x8c,0x97,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe7,0xa3,0xbb,0x00,0x01,0xff,0xe4,0xbe,0xbf,0x00,0x10,0x08,0x01,0xff,0xe5,0xbe, + 0xa9,0x00,0x01,0xff,0xe4,0xb8,0x8d,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe6,0xb3,0x8c,0x00,0x01,0xff,0xe6,0x95,0xb8,0x00,0x10,0x08,0x01,0xff,0xe7,0xb4, + 0xa2,0x00,0x01,0xff,0xe5,0x8f,0x83,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0xa1, + 0x9e,0x00,0x01,0xff,0xe7,0x9c,0x81,0x00,0x10,0x08,0x01,0xff,0xe8,0x91,0x89,0x00, + 0x01,0xff,0xe8,0xaa,0xaa,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe6,0xae,0xba,0x00,0x01,0xff,0xe8,0xbe,0xb0,0x00,0x10,0x08,0x01,0xff,0xe6,0xb2, + 0x88,0x00,0x01,0xff,0xe6,0x8b,0xbe,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x8b, + 0xa5,0x00,0x01,0xff,0xe6,0x8e,0xa0,0x00,0x10,0x08,0x01,0xff,0xe7,0x95,0xa5,0x00, + 0x01,0xff,0xe4,0xba,0xae,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x85, + 0xa9,0x00,0x01,0xff,0xe5,0x87,0x89,0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0x81,0x00, + 0x01,0xff,0xe7,0xb3,0xa7,0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0x89,0xaf,0x00, + 0x01,0xff,0xe8,0xab,0x92,0x00,0x10,0x08,0x01,0xff,0xe9,0x87,0x8f,0x00,0x01,0xff, + 0xe5,0x8b,0xb5,0x00,0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40, + 0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe5,0x91,0x82,0x00,0x01,0xff,0xe5,0xa5, + 0xb3,0x00,0x10,0x08,0x01,0xff,0xe5,0xbb,0xac,0x00,0x01,0xff,0xe6,0x97,0x85,0x00, + 0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0xbf,0xbe,0x00,0x01,0xff,0xe7,0xa4,0xaa,0x00, + 0x10,0x08,0x01,0xff,0xe9,0x96,0xad,0x00,0x01,0xff,0xe9,0xa9,0xaa,0x00,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xba,0x97,0x00,0x01,0xff,0xe9,0xbb,0x8e,0x00, + 0x10,0x08,0x01,0xff,0xe5,0x8a,0x9b,0x00,0x01,0xff,0xe6,0x9b,0x86,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe6,0xad,0xb7,0x00,0x01,0xff,0xe8,0xbd,0xa2,0x00,0x10,0x08, + 0x01,0xff,0xe5,0xb9,0xb4,0x00,0x01,0xff,0xe6,0x86,0x90,0x00,0xd3,0x40,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x88,0x80,0x00,0x01,0xff,0xe6,0x92,0x9a,0x00, + 0x10,0x08,0x01,0xff,0xe6,0xbc,0xa3,0x00,0x01,0xff,0xe7,0x85,0x89,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe7,0x92,0x89,0x00,0x01,0xff,0xe7,0xa7,0x8a,0x00,0x10,0x08, + 0x01,0xff,0xe7,0xb7,0xb4,0x00,0x01,0xff,0xe8,0x81,0xaf,0x00,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe8,0xbc,0xa6,0x00,0x01,0xff,0xe8,0x93,0xae,0x00,0x10,0x08, + 0x01,0xff,0xe9,0x80,0xa3,0x00,0x01,0xff,0xe9,0x8d,0x8a,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe5,0x88,0x97,0x00,0x01,0xff,0xe5,0x8a,0xa3,0x00,0x10,0x08,0x01,0xff, + 0xe5,0x92,0xbd,0x00,0x01,0xff,0xe7,0x83,0x88,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x01,0xff,0xe8,0xa3,0x82,0x00,0x01,0xff,0xe8,0xaa,0xaa,0x00, + 0x10,0x08,0x01,0xff,0xe5,0xbb,0x89,0x00,0x01,0xff,0xe5,0xbf,0xb5,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe6,0x8d,0xbb,0x00,0x01,0xff,0xe6,0xae,0xae,0x00,0x10,0x08, + 0x01,0xff,0xe7,0xb0,0xbe,0x00,0x01,0xff,0xe7,0x8d,0xb5,0x00,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe4,0xbb,0xa4,0x00,0x01,0xff,0xe5,0x9b,0xb9,0x00,0x10,0x08, + 0x01,0xff,0xe5,0xaf,0xa7,0x00,0x01,0xff,0xe5,0xb6,0xba,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe6,0x80,0x9c,0x00,0x01,0xff,0xe7,0x8e,0xb2,0x00,0x10,0x08,0x01,0xff, + 0xe7,0x91,0xa9,0x00,0x01,0xff,0xe7,0xbe,0x9a,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe8,0x81,0x86,0x00,0x01,0xff,0xe9,0x88,0xb4,0x00,0x10,0x08, + 0x01,0xff,0xe9,0x9b,0xb6,0x00,0x01,0xff,0xe9,0x9d,0x88,0x00,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe9,0xa0,0x98,0x00,0x01,0xff,0xe4,0xbe,0x8b,0x00,0x10,0x08,0x01,0xff, + 0xe7,0xa6,0xae,0x00,0x01,0xff,0xe9,0x86,0xb4,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe9,0x9a,0xb8,0x00,0x01,0xff,0xe6,0x83,0xa1,0x00,0x10,0x08,0x01,0xff, + 0xe4,0xba,0x86,0x00,0x01,0xff,0xe5,0x83,0x9a,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe5,0xaf,0xae,0x00,0x01,0xff,0xe5,0xb0,0xbf,0x00,0x10,0x08,0x01,0xff,0xe6,0x96, + 0x99,0x00,0x01,0xff,0xe6,0xa8,0x82,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3, + 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0x87,0x8e,0x00,0x01,0xff,0xe7, + 0x99,0x82,0x00,0x10,0x08,0x01,0xff,0xe8,0x93,0xbc,0x00,0x01,0xff,0xe9,0x81,0xbc, + 0x00,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xbe,0x8d,0x00,0x01,0xff,0xe6,0x9a,0x88, + 0x00,0x10,0x08,0x01,0xff,0xe9,0x98,0xae,0x00,0x01,0xff,0xe5,0x8a,0x89,0x00,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x9d,0xbb,0x00,0x01,0xff,0xe6,0x9f,0xb3, + 0x00,0x10,0x08,0x01,0xff,0xe6,0xb5,0x81,0x00,0x01,0xff,0xe6,0xba,0x9c,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe7,0x90,0x89,0x00,0x01,0xff,0xe7,0x95,0x99,0x00,0x10, + 0x08,0x01,0xff,0xe7,0xa1,0xab,0x00,0x01,0xff,0xe7,0xb4,0x90,0x00,0xd3,0x40,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe9,0xa1,0x9e,0x00,0x01,0xff,0xe5,0x85,0xad, + 0x00,0x10,0x08,0x01,0xff,0xe6,0x88,0xae,0x00,0x01,0xff,0xe9,0x99,0xb8,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe5,0x80,0xab,0x00,0x01,0xff,0xe5,0xb4,0x99,0x00,0x10, + 0x08,0x01,0xff,0xe6,0xb7,0xaa,0x00,0x01,0xff,0xe8,0xbc,0xaa,0x00,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe5,0xbe,0x8b,0x00,0x01,0xff,0xe6,0x85,0x84,0x00,0x10, + 0x08,0x01,0xff,0xe6,0xa0,0x97,0x00,0x01,0xff,0xe7,0x8e,0x87,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0xe9,0x9a,0x86,0x00,0x01,0xff,0xe5,0x88,0xa9,0x00,0x10,0x08,0x01, + 0xff,0xe5,0x90,0x8f,0x00,0x01,0xff,0xe5,0xb1,0xa5,0x00,0xd4,0x80,0xd3,0x40,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x01,0xff,0xe6,0x98,0x93,0x00,0x01,0xff,0xe6,0x9d,0x8e, + 0x00,0x10,0x08,0x01,0xff,0xe6,0xa2,0xa8,0x00,0x01,0xff,0xe6,0xb3,0xa5,0x00,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe7,0x90,0x86,0x00,0x01,0xff,0xe7,0x97,0xa2,0x00,0x10, + 0x08,0x01,0xff,0xe7,0xbd,0xb9,0x00,0x01,0xff,0xe8,0xa3,0x8f,0x00,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe8,0xa3,0xa1,0x00,0x01,0xff,0xe9,0x87,0x8c,0x00,0x10, + 0x08,0x01,0xff,0xe9,0x9b,0xa2,0x00,0x01,0xff,0xe5,0x8c,0xbf,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0xe6,0xba,0xba,0x00,0x01,0xff,0xe5,0x90,0x9d,0x00,0x10,0x08,0x01, + 0xff,0xe7,0x87,0x90,0x00,0x01,0xff,0xe7,0x92,0x98,0x00,0xd3,0x40,0xd2,0x20,0xd1, + 0x10,0x10,0x08,0x01,0xff,0xe8,0x97,0xba,0x00,0x01,0xff,0xe9,0x9a,0xa3,0x00,0x10, + 0x08,0x01,0xff,0xe9,0xb1,0x97,0x00,0x01,0xff,0xe9,0xba,0x9f,0x00,0xd1,0x10,0x10, + 0x08,0x01,0xff,0xe6,0x9e,0x97,0x00,0x01,0xff,0xe6,0xb7,0x8b,0x00,0x10,0x08,0x01, + 0xff,0xe8,0x87,0xa8,0x00,0x01,0xff,0xe7,0xab,0x8b,0x00,0xd2,0x20,0xd1,0x10,0x10, + 0x08,0x01,0xff,0xe7,0xac,0xa0,0x00,0x01,0xff,0xe7,0xb2,0x92,0x00,0x10,0x08,0x01, + 0xff,0xe7,0x8b,0x80,0x00,0x01,0xff,0xe7,0x82,0x99,0x00,0xd1,0x10,0x10,0x08,0x01, + 0xff,0xe8,0xad,0x98,0x00,0x01,0xff,0xe4,0xbb,0x80,0x00,0x10,0x08,0x01,0xff,0xe8, + 0x8c,0xb6,0x00,0x01,0xff,0xe5,0x88,0xba,0x00,0xe2,0xad,0x06,0xe1,0xc4,0x03,0xe0, + 0xcb,0x01,0xcf,0x86,0xd5,0xe4,0xd4,0x74,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x01,0xff,0xe5,0x88,0x87,0x00,0x01,0xff,0xe5,0xba,0xa6,0x00,0x10,0x08,0x01,0xff, + 0xe6,0x8b,0x93,0x00,0x01,0xff,0xe7,0xb3,0x96,0x00,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe5,0xae,0x85,0x00,0x01,0xff,0xe6,0xb4,0x9e,0x00,0x10,0x08,0x01,0xff,0xe6,0x9a, + 0xb4,0x00,0x01,0xff,0xe8,0xbc,0xbb,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x01,0xff, + 0xe8,0xa1,0x8c,0x00,0x01,0xff,0xe9,0x99,0x8d,0x00,0x10,0x08,0x01,0xff,0xe8,0xa6, + 0x8b,0x00,0x01,0xff,0xe5,0xbb,0x93,0x00,0x91,0x10,0x10,0x08,0x01,0xff,0xe5,0x85, + 0x80,0x00,0x01,0xff,0xe5,0x97,0x80,0x00,0x01,0x00,0xd3,0x34,0xd2,0x18,0xd1,0x0c, + 0x10,0x08,0x01,0xff,0xe5,0xa1,0x9a,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe6,0x99, + 0xb4,0x00,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe5,0x87,0x9e,0x00, + 0x10,0x08,0x01,0xff,0xe7,0x8c,0xaa,0x00,0x01,0xff,0xe7,0x9b,0x8a,0x00,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x01,0xff,0xe7,0xa4,0xbc,0x00,0x01,0xff,0xe7,0xa5,0x9e,0x00, + 0x10,0x08,0x01,0xff,0xe7,0xa5,0xa5,0x00,0x01,0xff,0xe7,0xa6,0x8f,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe9,0x9d,0x96,0x00,0x01,0xff,0xe7,0xb2,0xbe,0x00,0x10,0x08, + 0x01,0xff,0xe7,0xbe,0xbd,0x00,0x01,0x00,0xd4,0x64,0xd3,0x30,0xd2,0x18,0xd1,0x0c, + 0x10,0x08,0x01,0xff,0xe8,0x98,0x92,0x00,0x01,0x00,0x10,0x08,0x01,0xff,0xe8,0xab, + 0xb8,0x00,0x01,0x00,0xd1,0x0c,0x10,0x04,0x01,0x00,0x01,0xff,0xe9,0x80,0xb8,0x00, + 0x10,0x08,0x01,0xff,0xe9,0x83,0xbd,0x00,0x01,0x00,0xd2,0x14,0x51,0x04,0x01,0x00, + 0x10,0x08,0x01,0xff,0xe9,0xa3,0xaf,0x00,0x01,0xff,0xe9,0xa3,0xbc,0x00,0xd1,0x10, + 0x10,0x08,0x01,0xff,0xe9,0xa4,0xa8,0x00,0x01,0xff,0xe9,0xb6,0xb4,0x00,0x10,0x08, + 0x0d,0xff,0xe9,0x83,0x9e,0x00,0x0d,0xff,0xe9,0x9a,0xb7,0x00,0xd3,0x40,0xd2,0x20, + 0xd1,0x10,0x10,0x08,0x06,0xff,0xe4,0xbe,0xae,0x00,0x06,0xff,0xe5,0x83,0xa7,0x00, + 0x10,0x08,0x06,0xff,0xe5,0x85,0x8d,0x00,0x06,0xff,0xe5,0x8b,0x89,0x00,0xd1,0x10, + 0x10,0x08,0x06,0xff,0xe5,0x8b,0xa4,0x00,0x06,0xff,0xe5,0x8d,0x91,0x00,0x10,0x08, + 0x06,0xff,0xe5,0x96,0x9d,0x00,0x06,0xff,0xe5,0x98,0x86,0x00,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x06,0xff,0xe5,0x99,0xa8,0x00,0x06,0xff,0xe5,0xa1,0x80,0x00,0x10,0x08, + 0x06,0xff,0xe5,0xa2,0xa8,0x00,0x06,0xff,0xe5,0xb1,0xa4,0x00,0xd1,0x10,0x10,0x08, + 0x06,0xff,0xe5,0xb1,0xae,0x00,0x06,0xff,0xe6,0x82,0x94,0x00,0x10,0x08,0x06,0xff, + 0xe6,0x85,0xa8,0x00,0x06,0xff,0xe6,0x86,0x8e,0x00,0xcf,0x86,0xe5,0x01,0x01,0xd4, + 0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe6,0x87,0xb2,0x00,0x06, + 0xff,0xe6,0x95,0x8f,0x00,0x10,0x08,0x06,0xff,0xe6,0x97,0xa2,0x00,0x06,0xff,0xe6, + 0x9a,0x91,0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe6,0xa2,0x85,0x00,0x06,0xff,0xe6, + 0xb5,0xb7,0x00,0x10,0x08,0x06,0xff,0xe6,0xb8,0x9a,0x00,0x06,0xff,0xe6,0xbc,0xa2, + 0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0x85,0xae,0x00,0x06,0xff,0xe7, + 0x88,0xab,0x00,0x10,0x08,0x06,0xff,0xe7,0x90,0xa2,0x00,0x06,0xff,0xe7,0xa2,0x91, + 0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa4,0xbe,0x00,0x06,0xff,0xe7,0xa5,0x89, + 0x00,0x10,0x08,0x06,0xff,0xe7,0xa5,0x88,0x00,0x06,0xff,0xe7,0xa5,0x90,0x00,0xd3, + 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa5,0x96,0x00,0x06,0xff,0xe7, + 0xa5,0x9d,0x00,0x10,0x08,0x06,0xff,0xe7,0xa6,0x8d,0x00,0x06,0xff,0xe7,0xa6,0x8e, + 0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xa9,0x80,0x00,0x06,0xff,0xe7,0xaa,0x81, + 0x00,0x10,0x08,0x06,0xff,0xe7,0xaf,0x80,0x00,0x06,0xff,0xe7,0xb7,0xb4,0x00,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe7,0xb8,0x89,0x00,0x06,0xff,0xe7,0xb9,0x81, + 0x00,0x10,0x08,0x06,0xff,0xe7,0xbd,0xb2,0x00,0x06,0xff,0xe8,0x80,0x85,0x00,0xd1, + 0x10,0x10,0x08,0x06,0xff,0xe8,0x87,0xad,0x00,0x06,0xff,0xe8,0x89,0xb9,0x00,0x10, + 0x08,0x06,0xff,0xe8,0x89,0xb9,0x00,0x06,0xff,0xe8,0x91,0x97,0x00,0xd4,0x75,0xd3, + 0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8,0xa4,0x90,0x00,0x06,0xff,0xe8, + 0xa6,0x96,0x00,0x10,0x08,0x06,0xff,0xe8,0xac,0x81,0x00,0x06,0xff,0xe8,0xac,0xb9, + 0x00,0xd1,0x10,0x10,0x08,0x06,0xff,0xe8,0xb3,0x93,0x00,0x06,0xff,0xe8,0xb4,0x88, + 0x00,0x10,0x08,0x06,0xff,0xe8,0xbe,0xb6,0x00,0x06,0xff,0xe9,0x80,0xb8,0x00,0xd2, + 0x20,0xd1,0x10,0x10,0x08,0x06,0xff,0xe9,0x9b,0xa3,0x00,0x06,0xff,0xe9,0x9f,0xbf, + 0x00,0x10,0x08,0x06,0xff,0xe9,0xa0,0xbb,0x00,0x0b,0xff,0xe6,0x81,0xb5,0x00,0x91, + 0x11,0x10,0x09,0x0b,0xff,0xf0,0xa4,0x8b,0xae,0x00,0x0b,0xff,0xe8,0x88,0x98,0x00, + 0x00,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe4,0xb8,0xa6,0x00, + 0x08,0xff,0xe5,0x86,0xb5,0x00,0x10,0x08,0x08,0xff,0xe5,0x85,0xa8,0x00,0x08,0xff, + 0xe4,0xbe,0x80,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0x85,0x85,0x00,0x08,0xff, + 0xe5,0x86,0x80,0x00,0x10,0x08,0x08,0xff,0xe5,0x8b,0x87,0x00,0x08,0xff,0xe5,0x8b, + 0xba,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0x96,0x9d,0x00,0x08,0xff, + 0xe5,0x95,0x95,0x00,0x10,0x08,0x08,0xff,0xe5,0x96,0x99,0x00,0x08,0xff,0xe5,0x97, + 0xa2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe5,0xa1,0x9a,0x00,0x08,0xff,0xe5,0xa2, + 0xb3,0x00,0x10,0x08,0x08,0xff,0xe5,0xa5,0x84,0x00,0x08,0xff,0xe5,0xa5,0x94,0x00, + 0xe0,0x04,0x02,0xcf,0x86,0xe5,0x01,0x01,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x08,0xff,0xe5,0xa9,0xa2,0x00,0x08,0xff,0xe5,0xac,0xa8,0x00,0x10,0x08, + 0x08,0xff,0xe5,0xbb,0x92,0x00,0x08,0xff,0xe5,0xbb,0x99,0x00,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe5,0xbd,0xa9,0x00,0x08,0xff,0xe5,0xbe,0xad,0x00,0x10,0x08,0x08,0xff, + 0xe6,0x83,0x98,0x00,0x08,0xff,0xe6,0x85,0x8e,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe6,0x84,0x88,0x00,0x08,0xff,0xe6,0x86,0x8e,0x00,0x10,0x08,0x08,0xff, + 0xe6,0x85,0xa0,0x00,0x08,0xff,0xe6,0x87,0xb2,0x00,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe6,0x88,0xb4,0x00,0x08,0xff,0xe6,0x8f,0x84,0x00,0x10,0x08,0x08,0xff,0xe6,0x90, + 0x9c,0x00,0x08,0xff,0xe6,0x91,0x92,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe6,0x95,0x96,0x00,0x08,0xff,0xe6,0x99,0xb4,0x00,0x10,0x08,0x08,0xff, + 0xe6,0x9c,0x97,0x00,0x08,0xff,0xe6,0x9c,0x9b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe6,0x9d,0x96,0x00,0x08,0xff,0xe6,0xad,0xb9,0x00,0x10,0x08,0x08,0xff,0xe6,0xae, + 0xba,0x00,0x08,0xff,0xe6,0xb5,0x81,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe6,0xbb,0x9b,0x00,0x08,0xff,0xe6,0xbb,0x8b,0x00,0x10,0x08,0x08,0xff,0xe6,0xbc, + 0xa2,0x00,0x08,0xff,0xe7,0x80,0x9e,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0x85, + 0xae,0x00,0x08,0xff,0xe7,0x9e,0xa7,0x00,0x10,0x08,0x08,0xff,0xe7,0x88,0xb5,0x00, + 0x08,0xff,0xe7,0x8a,0xaf,0x00,0xd4,0x80,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe7,0x8c,0xaa,0x00,0x08,0xff,0xe7,0x91,0xb1,0x00,0x10,0x08,0x08,0xff, + 0xe7,0x94,0x86,0x00,0x08,0xff,0xe7,0x94,0xbb,0x00,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe7,0x98,0x9d,0x00,0x08,0xff,0xe7,0x98,0x9f,0x00,0x10,0x08,0x08,0xff,0xe7,0x9b, + 0x8a,0x00,0x08,0xff,0xe7,0x9b,0x9b,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe7,0x9b,0xb4,0x00,0x08,0xff,0xe7,0x9d,0x8a,0x00,0x10,0x08,0x08,0xff,0xe7,0x9d, + 0x80,0x00,0x08,0xff,0xe7,0xa3,0x8c,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe7,0xaa, + 0xb1,0x00,0x08,0xff,0xe7,0xaf,0x80,0x00,0x10,0x08,0x08,0xff,0xe7,0xb1,0xbb,0x00, + 0x08,0xff,0xe7,0xb5,0x9b,0x00,0xd3,0x40,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe7,0xb7,0xb4,0x00,0x08,0xff,0xe7,0xbc,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0x80, + 0x85,0x00,0x08,0xff,0xe8,0x8d,0x92,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0x8f, + 0xaf,0x00,0x08,0xff,0xe8,0x9d,0xb9,0x00,0x10,0x08,0x08,0xff,0xe8,0xa5,0x81,0x00, + 0x08,0xff,0xe8,0xa6,0x86,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xa6, + 0x96,0x00,0x08,0xff,0xe8,0xaa,0xbf,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xb8,0x00, + 0x08,0xff,0xe8,0xab,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff,0xe8,0xac,0x81,0x00, + 0x08,0xff,0xe8,0xab,0xbe,0x00,0x10,0x08,0x08,0xff,0xe8,0xab,0xad,0x00,0x08,0xff, + 0xe8,0xac,0xb9,0x00,0xcf,0x86,0x95,0xde,0xd4,0x81,0xd3,0x40,0xd2,0x20,0xd1,0x10, + 0x10,0x08,0x08,0xff,0xe8,0xae,0x8a,0x00,0x08,0xff,0xe8,0xb4,0x88,0x00,0x10,0x08, + 0x08,0xff,0xe8,0xbc,0xb8,0x00,0x08,0xff,0xe9,0x81,0xb2,0x00,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe9,0x86,0x99,0x00,0x08,0xff,0xe9,0x89,0xb6,0x00,0x10,0x08,0x08,0xff, + 0xe9,0x99,0xbc,0x00,0x08,0xff,0xe9,0x9b,0xa3,0x00,0xd2,0x20,0xd1,0x10,0x10,0x08, + 0x08,0xff,0xe9,0x9d,0x96,0x00,0x08,0xff,0xe9,0x9f,0x9b,0x00,0x10,0x08,0x08,0xff, + 0xe9,0x9f,0xbf,0x00,0x08,0xff,0xe9,0xa0,0x8b,0x00,0xd1,0x10,0x10,0x08,0x08,0xff, + 0xe9,0xa0,0xbb,0x00,0x08,0xff,0xe9,0xac,0x92,0x00,0x10,0x08,0x08,0xff,0xe9,0xbe, + 0x9c,0x00,0x08,0xff,0xf0,0xa2,0xa1,0x8a,0x00,0xd3,0x45,0xd2,0x22,0xd1,0x12,0x10, + 0x09,0x08,0xff,0xf0,0xa2,0xa1,0x84,0x00,0x08,0xff,0xf0,0xa3,0x8f,0x95,0x00,0x10, + 0x08,0x08,0xff,0xe3,0xae,0x9d,0x00,0x08,0xff,0xe4,0x80,0x98,0x00,0xd1,0x11,0x10, + 0x08,0x08,0xff,0xe4,0x80,0xb9,0x00,0x08,0xff,0xf0,0xa5,0x89,0x89,0x00,0x10,0x09, + 0x08,0xff,0xf0,0xa5,0xb3,0x90,0x00,0x08,0xff,0xf0,0xa7,0xbb,0x93,0x00,0x92,0x14, + 0x91,0x10,0x10,0x08,0x08,0xff,0xe9,0xbd,0x83,0x00,0x08,0xff,0xe9,0xbe,0x8e,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0xe1,0x94,0x01,0xe0,0x08,0x01,0xcf,0x86,0xd5,0x42, + 0xd4,0x14,0x93,0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00, + 0x00,0x00,0x00,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00, + 0x01,0x00,0x01,0x00,0x52,0x04,0x00,0x00,0xd1,0x0d,0x10,0x04,0x00,0x00,0x04,0xff, + 0xd7,0x99,0xd6,0xb4,0x00,0x10,0x04,0x01,0x1a,0x01,0xff,0xd7,0xb2,0xd6,0xb7,0x00, + 0xd4,0x42,0x53,0x04,0x01,0x00,0xd2,0x16,0x51,0x04,0x01,0x00,0x10,0x09,0x01,0xff, + 0xd7,0xa9,0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd7,0x82,0x00,0xd1,0x16,0x10,0x0b, + 0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7,0x81,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0xd7, + 0x82,0x00,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xb7,0x00,0x01,0xff,0xd7,0x90,0xd6, + 0xb8,0x00,0xd3,0x43,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x90,0xd6,0xbc, + 0x00,0x01,0xff,0xd7,0x91,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x92,0xd6,0xbc, + 0x00,0x01,0xff,0xd7,0x93,0xd6,0xbc,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x94, + 0xd6,0xbc,0x00,0x01,0xff,0xd7,0x95,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x96, + 0xd6,0xbc,0x00,0x00,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0x98,0xd6, + 0xbc,0x00,0x01,0xff,0xd7,0x99,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0x9a,0xd6, + 0xbc,0x00,0x01,0xff,0xd7,0x9b,0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7, + 0x9c,0xd6,0xbc,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xd7,0x9e,0xd6,0xbc,0x00,0x00, + 0x00,0xcf,0x86,0x95,0x85,0x94,0x81,0xd3,0x3e,0xd2,0x1f,0xd1,0x12,0x10,0x09,0x01, + 0xff,0xd7,0xa0,0xd6,0xbc,0x00,0x01,0xff,0xd7,0xa1,0xd6,0xbc,0x00,0x10,0x04,0x00, + 0x00,0x01,0xff,0xd7,0xa3,0xd6,0xbc,0x00,0xd1,0x0d,0x10,0x09,0x01,0xff,0xd7,0xa4, + 0xd6,0xbc,0x00,0x00,0x00,0x10,0x09,0x01,0xff,0xd7,0xa6,0xd6,0xbc,0x00,0x01,0xff, + 0xd7,0xa7,0xd6,0xbc,0x00,0xd2,0x24,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7,0xa8,0xd6, + 0xbc,0x00,0x01,0xff,0xd7,0xa9,0xd6,0xbc,0x00,0x10,0x09,0x01,0xff,0xd7,0xaa,0xd6, + 0xbc,0x00,0x01,0xff,0xd7,0x95,0xd6,0xb9,0x00,0xd1,0x12,0x10,0x09,0x01,0xff,0xd7, + 0x91,0xd6,0xbf,0x00,0x01,0xff,0xd7,0x9b,0xd6,0xbf,0x00,0x10,0x09,0x01,0xff,0xd7, + 0xa4,0xd6,0xbf,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04, + 0x01,0x00,0x54,0x04,0x01,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x01,0x00,0x0c,0x00, + 0x0c,0x00,0x0c,0x00,0xcf,0x86,0x95,0x24,0xd4,0x10,0x93,0x0c,0x92,0x08,0x11,0x04, + 0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x00,0x00, + 0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0xd3,0x5a,0xd2,0x06, + 0xcf,0x06,0x01,0x00,0xd1,0x14,0xd0,0x06,0xcf,0x06,0x01,0x00,0xcf,0x86,0x95,0x08, + 0x14,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x54,0x04, + 0x01,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00, + 0x01,0x00,0xcf,0x86,0xd5,0x0c,0x94,0x08,0x13,0x04,0x01,0x00,0x00,0x00,0x05,0x00, + 0x54,0x04,0x05,0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x91,0x08,0x10,0x04, + 0x06,0x00,0x07,0x00,0x00,0x00,0xd2,0xce,0xd1,0xa5,0xd0,0x37,0xcf,0x86,0xd5,0x15, + 0x54,0x05,0x06,0xff,0x00,0x53,0x04,0x08,0x00,0x92,0x08,0x11,0x04,0x08,0x00,0x00, + 0x00,0x00,0x00,0x94,0x1c,0xd3,0x10,0x52,0x04,0x01,0xe6,0x51,0x04,0x0a,0xe6,0x10, + 0x04,0x0a,0xe6,0x10,0xdc,0x52,0x04,0x10,0xdc,0x11,0x04,0x10,0xdc,0x11,0xe6,0x01, + 0x00,0xcf,0x86,0xd5,0x38,0xd4,0x24,0xd3,0x14,0x52,0x04,0x01,0x00,0xd1,0x08,0x10, + 0x04,0x01,0x00,0x06,0x00,0x10,0x04,0x06,0x00,0x07,0x00,0x92,0x0c,0x91,0x08,0x10, + 0x04,0x07,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x01, + 0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd4,0x18,0xd3,0x10,0x52, + 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x12,0x04,0x01, + 0x00,0x00,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06, + 0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x01,0x00,0x01,0x00,0xd0,0x06,0xcf, + 0x06,0x01,0x00,0xcf,0x86,0x55,0x04,0x01,0x00,0x54,0x04,0x01,0x00,0x53,0x04,0x01, + 0x00,0x52,0x04,0x01,0x00,0xd1,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x10,0x04,0x00, + 0x00,0x01,0xff,0x00,0xd1,0x50,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93,0x10, + 0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01,0x00, + 0x01,0x00,0x01,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x01,0x00,0x53,0x04,0x01,0x00, + 0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x06,0x00,0x94,0x14, + 0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x06,0x00,0x01,0x00,0x01,0x00,0x01,0x00, + 0x01,0x00,0x01,0x00,0xd0,0x2f,0xcf,0x86,0x55,0x04,0x01,0x00,0xd4,0x15,0x93,0x11, + 0x92,0x0d,0x91,0x09,0x10,0x05,0x01,0xff,0x00,0x01,0x00,0x01,0x00,0x01,0x00,0x01, + 0x00,0x53,0x04,0x01,0x00,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01, + 0x00,0x00,0x00,0xcf,0x86,0xd5,0x38,0xd4,0x18,0xd3,0x0c,0x92,0x08,0x11,0x04,0x00, + 0x00,0x01,0x00,0x01,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd3, + 0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x01,0x00,0x01,0x00,0xd2,0x08,0x11,0x04,0x00, + 0x00,0x01,0x00,0x91,0x08,0x10,0x04,0x01,0x00,0x00,0x00,0x00,0x00,0xd4,0x20,0xd3, + 0x10,0x52,0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x52, + 0x04,0x01,0x00,0x51,0x04,0x01,0x00,0x10,0x04,0x01,0x00,0x00,0x00,0x53,0x05,0x00, + 0xff,0x00,0xd2,0x0d,0x91,0x09,0x10,0x05,0x00,0xff,0x00,0x04,0x00,0x04,0x00,0x91, + 0x08,0x10,0x04,0x03,0x00,0x01,0x00,0x01,0x00,0x83,0xe2,0x46,0x3e,0xe1,0x1f,0x3b, + 0xe0,0x9c,0x39,0xcf,0x86,0xe5,0x40,0x26,0xc4,0xe3,0x16,0x14,0xe2,0xef,0x11,0xe1, + 0xd0,0x10,0xe0,0x60,0x07,0xcf,0x86,0xe5,0x53,0x03,0xe4,0x4c,0x02,0xe3,0x3d,0x01, + 0xd2,0x94,0xd1,0x70,0xd0,0x4a,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x07,0x00, + 0x52,0x04,0x07,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00, + 0xd4,0x14,0x93,0x10,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00, + 0x00,0x00,0x07,0x00,0x53,0x04,0x07,0x00,0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04, + 0x07,0x00,0x00,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86, + 0x95,0x20,0xd4,0x10,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00, + 0x00,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00, + 0x00,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x54,0x04, + 0x07,0x00,0x53,0x04,0x07,0x00,0x92,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00, + 0x00,0x00,0x00,0x00,0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x94,0x1c,0x93,0x18, + 0xd2,0x0c,0x51,0x04,0x07,0x00,0x10,0x04,0x07,0x00,0x00,0x00,0x51,0x04,0x00,0x00, + 0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07,0x00,0x93,0x10, + 0x52,0x04,0x07,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0x07,0x00, + 0xcf,0x06,0x08,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4,0x20,0x53,0x04,0x08,0x00, + 0xd2,0x0c,0x51,0x04,0x08,0x00,0x10,0x04,0x08,0x00,0x10,0x00,0xd1,0x08,0x10,0x04, + 0x10,0x00,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x53,0x04,0x0a,0x00,0x12,0x04, + 0x0a,0x00,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04, + 0x00,0x00,0x0a,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0x52,0x04,0x0a,0x00, + 0x91,0x08,0x10,0x04,0x0a,0x00,0x0a,0xdc,0x00,0x00,0xd2,0x5e,0xd1,0x06,0xcf,0x06, + 0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00, + 0x52,0x04,0x0a,0x00,0x91,0x08,0x10,0x04,0x0a,0x00,0x00,0x00,0x00,0x00,0x0a,0x00, + 0xcf,0x86,0xd5,0x18,0x54,0x04,0x0a,0x00,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04, + 0x0a,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c, + 0x91,0x08,0x10,0x04,0x10,0xdc,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04, + 0x10,0x00,0x12,0x04,0x10,0x00,0x00,0x00,0xd1,0x70,0xd0,0x36,0xcf,0x86,0xd5,0x18, + 0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x51,0x04,0x05,0x00, + 0x10,0x04,0x05,0x00,0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x05,0x00,0x00,0x00, + 0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x05,0x00, + 0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x05,0x00,0x92,0x0c,0x51,0x04,0x05,0x00, + 0x10,0x04,0x05,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x0c, + 0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x10,0xe6,0x92,0x0c,0x51,0x04,0x10,0xe6, + 0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04, + 0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04, + 0x00,0x00,0x07,0x00,0x08,0x00,0xcf,0x86,0x95,0x1c,0xd4,0x0c,0x93,0x08,0x12,0x04, + 0x08,0x00,0x00,0x00,0x08,0x00,0x93,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x08,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0xd3,0xba,0xd2,0x80,0xd1,0x34,0xd0,0x1a,0xcf,0x86, + 0x55,0x04,0x05,0x00,0x94,0x10,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00, + 0x07,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x53,0x04,0x05,0x00, + 0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0xd0,0x2a, + 0xcf,0x86,0xd5,0x14,0x54,0x04,0x07,0x00,0x53,0x04,0x07,0x00,0x52,0x04,0x07,0x00, + 0x11,0x04,0x07,0x00,0x00,0x00,0x94,0x10,0x53,0x04,0x07,0x00,0x92,0x08,0x11,0x04, + 0x07,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xcf,0x86,0xd5,0x10,0x54,0x04,0x12,0x00, + 0x93,0x08,0x12,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x54,0x04,0x12,0x00,0x53,0x04, + 0x12,0x00,0x12,0x04,0x12,0x00,0x00,0x00,0xd1,0x34,0xd0,0x12,0xcf,0x86,0x55,0x04, + 0x10,0x00,0x94,0x08,0x13,0x04,0x10,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04, + 0x10,0x00,0x94,0x18,0xd3,0x08,0x12,0x04,0x10,0x00,0x00,0x00,0x52,0x04,0x00,0x00, + 0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00, + 0xd2,0x06,0xcf,0x06,0x10,0x00,0xd1,0x40,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00, + 0x54,0x04,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04, + 0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x10,0x00,0x93,0x0c, + 0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x08,0x13,0x04, + 0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe4,0xce,0x02,0xe3,0x45,0x01, + 0xd2,0xd0,0xd1,0x70,0xd0,0x52,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3,0x0c,0x52,0x04, + 0x07,0x00,0x11,0x04,0x07,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x07,0x00, + 0x00,0x00,0x07,0x00,0x07,0x00,0x07,0x00,0x54,0x04,0x07,0x00,0xd3,0x10,0x52,0x04, + 0x07,0x00,0x51,0x04,0x07,0x00,0x10,0x04,0x00,0x00,0x07,0x00,0xd2,0x0c,0x91,0x08, + 0x10,0x04,0x07,0x00,0x00,0x00,0x00,0x00,0xd1,0x08,0x10,0x04,0x07,0x00,0x00,0x00, + 0x10,0x04,0x00,0x00,0x07,0x00,0xcf,0x86,0x95,0x18,0x54,0x04,0x0b,0x00,0x93,0x10, + 0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04,0x00,0x00,0x0b,0x00,0x0b,0x00, + 0x10,0x00,0xd0,0x32,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00, + 0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14, + 0x93,0x10,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00, + 0x10,0x00,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x11,0x00,0xd3,0x14, + 0xd2,0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x04,0x11,0x00, + 0x00,0x00,0x92,0x0c,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x11,0x00,0x11,0x00, + 0xd1,0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x1c,0x54,0x04,0x09,0x00,0x53,0x04,0x09,0x00, + 0xd2,0x08,0x11,0x04,0x09,0x00,0x0b,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00, + 0x09,0x00,0x54,0x04,0x0a,0x00,0x53,0x04,0x0a,0x00,0xd2,0x08,0x11,0x04,0x0a,0x00, + 0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0a,0x00,0xcf,0x06,0x00,0x00, + 0xd0,0x1a,0xcf,0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0x53,0x04,0x0d,0x00, + 0x52,0x04,0x00,0x00,0x11,0x04,0x11,0x00,0x0d,0x00,0xcf,0x86,0x95,0x14,0x54,0x04, + 0x11,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x11,0x00,0x11,0x00,0x11,0x00, + 0x11,0x00,0xd2,0xec,0xd1,0xa4,0xd0,0x76,0xcf,0x86,0xd5,0x48,0xd4,0x28,0xd3,0x14, + 0x52,0x04,0x08,0x00,0xd1,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x10,0x04,0x08,0x00, + 0x00,0x00,0x52,0x04,0x00,0x00,0xd1,0x08,0x10,0x04,0x08,0x00,0x08,0xdc,0x10,0x04, + 0x08,0x00,0x08,0xe6,0xd3,0x10,0x52,0x04,0x08,0x00,0x91,0x08,0x10,0x04,0x00,0x00, + 0x08,0x00,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x08,0x00,0x08,0x00, + 0x08,0x00,0x54,0x04,0x08,0x00,0xd3,0x0c,0x52,0x04,0x08,0x00,0x11,0x04,0x14,0x00, + 0x00,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04,0x08,0xe6,0x08,0x01,0x10,0x04,0x08,0xdc, + 0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x08,0x09,0xcf,0x86,0x95,0x28, + 0xd4,0x14,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x53,0x04,0x08,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x08,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x0b,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x10,0x00, + 0x00,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x24,0xd3,0x14,0x52,0x04,0x10,0x00, + 0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0xe6,0x10,0x04,0x10,0xdc,0x00,0x00,0x92,0x0c, + 0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x52,0x04, + 0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd1,0x54, + 0xd0,0x26,0xcf,0x86,0x55,0x04,0x0b,0x00,0x54,0x04,0x0b,0x00,0xd3,0x0c,0x52,0x04, + 0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00, + 0x0b,0x00,0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x0b,0x00,0x93,0x0c, + 0x52,0x04,0x0b,0x00,0x11,0x04,0x0b,0x00,0x00,0x00,0x0b,0x00,0x54,0x04,0x0b,0x00, + 0x93,0x10,0x92,0x0c,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00, + 0x0b,0x00,0xd0,0x42,0xcf,0x86,0xd5,0x28,0x54,0x04,0x10,0x00,0xd3,0x0c,0x92,0x08, + 0x11,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd2,0x0c,0x91,0x08,0x10,0x04,0x00,0x00, + 0x10,0x00,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14, + 0x53,0x04,0x00,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00, + 0x10,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x96,0xd2,0x68,0xd1,0x24,0xd0,0x06, + 0xcf,0x06,0x0b,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x0b,0x00,0x92,0x0c, + 0x91,0x08,0x10,0x04,0x0b,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0xd0,0x1e,0xcf,0x86,0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0x93,0x10,0x92,0x0c, + 0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86, + 0x55,0x04,0x11,0x00,0x54,0x04,0x11,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x11,0x00, + 0x10,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x11,0x00, + 0x11,0x00,0xd1,0x28,0xd0,0x22,0xcf,0x86,0x55,0x04,0x14,0x00,0xd4,0x0c,0x93,0x08, + 0x12,0x04,0x14,0x00,0x14,0xe6,0x00,0x00,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04, + 0x14,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a, + 0xd1,0x24,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04, + 0x0b,0x00,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10,0x04, + 0x0b,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x58,0xd0,0x12,0xcf,0x86,0x55,0x04, + 0x14,0x00,0x94,0x08,0x13,0x04,0x14,0x00,0x00,0x00,0x14,0x00,0xcf,0x86,0x95,0x40, + 0xd4,0x24,0xd3,0x0c,0x52,0x04,0x14,0x00,0x11,0x04,0x14,0x00,0x14,0xdc,0xd2,0x0c, + 0x51,0x04,0x14,0xe6,0x10,0x04,0x14,0xe6,0x14,0xdc,0x91,0x08,0x10,0x04,0x14,0xe6, + 0x14,0xdc,0x14,0xdc,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0xdc,0x14,0x00, + 0x14,0x00,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x15,0x00, + 0x93,0x10,0x52,0x04,0x15,0x00,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00, + 0x00,0x00,0xcf,0x86,0xe5,0x0f,0x06,0xe4,0xf8,0x03,0xe3,0x02,0x02,0xd2,0xfb,0xd1, + 0x4c,0xd0,0x06,0xcf,0x06,0x0c,0x00,0xcf,0x86,0xd5,0x2c,0xd4,0x1c,0xd3,0x10,0x52, + 0x04,0x0c,0x00,0x51,0x04,0x0c,0x00,0x10,0x04,0x0c,0x09,0x0c,0x00,0x52,0x04,0x0c, + 0x00,0x11,0x04,0x0c,0x00,0x00,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x0c, + 0x00,0x0c,0x00,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04,0x00,0x00,0x52,0x04,0x00, + 0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x09,0xd0,0x69,0xcf,0x86,0xd5, + 0x32,0x54,0x04,0x0b,0x00,0x53,0x04,0x0b,0x00,0xd2,0x15,0x51,0x04,0x0b,0x00,0x10, + 0x0d,0x0b,0xff,0xf0,0x91,0x82,0x99,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x91,0x11, + 0x10,0x0d,0x0b,0xff,0xf0,0x91,0x82,0x9b,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x0b, + 0x00,0xd4,0x1d,0x53,0x04,0x0b,0x00,0x92,0x15,0x51,0x04,0x0b,0x00,0x10,0x04,0x0b, + 0x00,0x0b,0xff,0xf0,0x91,0x82,0xa5,0xf0,0x91,0x82,0xba,0x00,0x0b,0x00,0x53,0x04, + 0x0b,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x0b,0x00,0x0b,0x09,0x10,0x04,0x0b,0x07, + 0x0b,0x00,0x0b,0x00,0xcf,0x86,0xd5,0x20,0x94,0x1c,0xd3,0x0c,0x92,0x08,0x11,0x04, + 0x0b,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00, + 0x14,0x00,0x00,0x00,0x0d,0x00,0xd4,0x14,0x53,0x04,0x0d,0x00,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x08, + 0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0xd1,0x96,0xd0,0x5c,0xcf,0x86,0xd5,0x18, + 0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x0d,0xe6,0x10,0x04,0x0d,0xe6,0x0d,0x00, + 0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd4,0x26,0x53,0x04,0x0d,0x00,0x52,0x04,0x0d,0x00, + 0x51,0x04,0x0d,0x00,0x10,0x0d,0x0d,0xff,0xf0,0x91,0x84,0xb1,0xf0,0x91,0x84,0xa7, + 0x00,0x0d,0xff,0xf0,0x91,0x84,0xb2,0xf0,0x91,0x84,0xa7,0x00,0x93,0x18,0xd2,0x0c, + 0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x0d,0x09,0x91,0x08,0x10,0x04,0x0d,0x09, + 0x00,0x00,0x0d,0x00,0x0d,0x00,0xcf,0x86,0xd5,0x18,0x94,0x14,0x93,0x10,0x52,0x04, + 0x0d,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x10,0x00, + 0x54,0x04,0x10,0x00,0x93,0x18,0xd2,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00, + 0x10,0x07,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0,0x06, + 0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x40,0xd4,0x2c,0xd3,0x10,0x92,0x0c,0x91,0x08, + 0x10,0x04,0x0d,0x09,0x0d,0x00,0x0d,0x00,0x0d,0x00,0xd2,0x10,0xd1,0x08,0x10,0x04, + 0x0d,0x00,0x11,0x00,0x10,0x04,0x11,0x07,0x11,0x00,0x91,0x08,0x10,0x04,0x11,0x00, + 0x10,0x00,0x00,0x00,0x53,0x04,0x0d,0x00,0x92,0x0c,0x51,0x04,0x0d,0x00,0x10,0x04, + 0x10,0x00,0x11,0x00,0x11,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04, + 0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x93,0x10,0x52,0x04,0x10,0x00, + 0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xc8,0xd1,0x48, + 0xd0,0x42,0xcf,0x86,0xd5,0x18,0x54,0x04,0x10,0x00,0x93,0x10,0x92,0x0c,0x51,0x04, + 0x10,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x54,0x04,0x10,0x00, + 0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0x09,0x10,0x04, + 0x10,0x07,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x12,0x00, + 0x00,0x00,0xcf,0x06,0x00,0x00,0xd0,0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x28,0xd3,0x10, + 0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0xd2,0x0c, + 0x91,0x08,0x10,0x04,0x11,0x00,0x00,0x00,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04, + 0x00,0x00,0x11,0x00,0x53,0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00, + 0x10,0x04,0x00,0x00,0x11,0x00,0x94,0x10,0x53,0x04,0x11,0x00,0x92,0x08,0x11,0x04, + 0x11,0x00,0x00,0x00,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x18, + 0x53,0x04,0x10,0x00,0x92,0x10,0xd1,0x08,0x10,0x04,0x10,0x00,0x10,0x07,0x10,0x04, + 0x10,0x09,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00, + 0x00,0x00,0x00,0x00,0xe1,0x27,0x01,0xd0,0x8a,0xcf,0x86,0xd5,0x44,0xd4,0x2c,0xd3, + 0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11,0x00,0x10,0x00,0x10,0x00,0x91,0x08,0x10, + 0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10,0x00,0xd1,0x08,0x10,0x04,0x10, + 0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x93,0x14,0x92,0x10,0xd1,0x08,0x10, + 0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0xd4, + 0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10, + 0x00,0x10,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x10, + 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd2,0x0c,0x51,0x04,0x10, + 0x00,0x10,0x04,0x00,0x00,0x14,0x07,0x91,0x08,0x10,0x04,0x10,0x07,0x10,0x00,0x10, + 0x00,0xcf,0x86,0xd5,0x6a,0xd4,0x42,0xd3,0x14,0x52,0x04,0x10,0x00,0xd1,0x08,0x10, + 0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0xd2,0x19,0xd1,0x08,0x10, + 0x04,0x10,0x00,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0xff,0xf0,0x91,0x8d,0x87,0xf0, + 0x91,0x8c,0xbe,0x00,0x91,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x8d,0x87,0xf0,0x91, + 0x8d,0x97,0x00,0x10,0x09,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x91,0x08,0x10,0x04,0x11, + 0x00,0x00,0x00,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x10,0x00,0x52, + 0x04,0x00,0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd4,0x1c,0xd3, + 0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x00,0x00,0x10,0xe6,0x52,0x04,0x10,0xe6,0x91, + 0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x10,0xe6,0x91, + 0x08,0x10,0x04,0x10,0xe6,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe3, + 0x30,0x01,0xd2,0xb7,0xd1,0x48,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x95,0x3c, + 0xd4,0x1c,0x93,0x18,0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x09,0x12,0x00, + 0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x07,0x12,0x00,0x12,0x00,0x53,0x04,0x12,0x00, + 0xd2,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x00,0x00,0x12,0x00,0xd1,0x08,0x10,0x04, + 0x00,0x00,0x12,0x00,0x10,0x04,0x14,0xe6,0x15,0x00,0x00,0x00,0xd0,0x45,0xcf,0x86, + 0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0xd2,0x15,0x51,0x04, + 0x10,0x00,0x10,0x04,0x10,0x00,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xba, + 0x00,0xd1,0x11,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xb0,0x00, + 0x10,0x00,0x10,0x0d,0x10,0xff,0xf0,0x91,0x92,0xb9,0xf0,0x91,0x92,0xbd,0x00,0x10, + 0x00,0xcf,0x86,0x95,0x24,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10, + 0x04,0x10,0x09,0x10,0x07,0x10,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11, + 0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0, + 0x40,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x0c,0x52,0x04,0x10, + 0x00,0x11,0x04,0x10,0x00,0x00,0x00,0xd2,0x1e,0x51,0x04,0x10,0x00,0x10,0x0d,0x10, + 0xff,0xf0,0x91,0x96,0xb8,0xf0,0x91,0x96,0xaf,0x00,0x10,0xff,0xf0,0x91,0x96,0xb9, + 0xf0,0x91,0x96,0xaf,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10,0x09,0xcf, + 0x86,0x95,0x2c,0xd4,0x1c,0xd3,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x07,0x10, + 0x00,0x10,0x00,0x10,0x00,0x92,0x08,0x11,0x04,0x10,0x00,0x11,0x00,0x11,0x00,0x53, + 0x04,0x11,0x00,0x52,0x04,0x11,0x00,0x11,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0xd2, + 0xa0,0xd1,0x5c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x53, + 0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x10, + 0x09,0xcf,0x86,0xd5,0x24,0xd4,0x14,0x93,0x10,0x52,0x04,0x10,0x00,0x91,0x08,0x10, + 0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x08,0x11, + 0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12, + 0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd0,0x2a,0xcf, + 0x86,0x55,0x04,0x0d,0x00,0x54,0x04,0x0d,0x00,0xd3,0x10,0x52,0x04,0x0d,0x00,0x51, + 0x04,0x0d,0x00,0x10,0x04,0x0d,0x09,0x0d,0x07,0x92,0x0c,0x91,0x08,0x10,0x04,0x15, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x95,0x14,0x94,0x10,0x53,0x04,0x0d, + 0x00,0x92,0x08,0x11,0x04,0x0d,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1, + 0x40,0xd0,0x3a,0xcf,0x86,0xd5,0x20,0x54,0x04,0x11,0x00,0x53,0x04,0x11,0x00,0xd2, + 0x0c,0x51,0x04,0x11,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x91,0x08,0x10,0x04,0x00, + 0x00,0x11,0x00,0x11,0x00,0x94,0x14,0x53,0x04,0x11,0x00,0x92,0x0c,0x51,0x04,0x11, + 0x00,0x10,0x04,0x11,0x00,0x11,0x09,0x00,0x00,0x11,0x00,0xcf,0x06,0x00,0x00,0xcf, + 0x06,0x00,0x00,0xe4,0x59,0x01,0xd3,0xb2,0xd2,0x5c,0xd1,0x28,0xd0,0x22,0xcf,0x86, + 0x55,0x04,0x14,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14,0x00,0x92,0x10,0xd1,0x08, + 0x10,0x04,0x14,0x00,0x14,0x09,0x10,0x04,0x14,0x07,0x14,0x00,0x00,0x00,0xcf,0x06, + 0x00,0x00,0xd0,0x0a,0xcf,0x86,0x15,0x04,0x00,0x00,0x10,0x00,0xcf,0x86,0x55,0x04, + 0x10,0x00,0x54,0x04,0x10,0x00,0xd3,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04, + 0x10,0x00,0x00,0x00,0x00,0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04, + 0x00,0x00,0x10,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x1a,0xcf,0x86,0x55,0x04, + 0x00,0x00,0x94,0x10,0x53,0x04,0x15,0x00,0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00, + 0x15,0x00,0x15,0x00,0xcf,0x86,0xd5,0x14,0x54,0x04,0x15,0x00,0x53,0x04,0x15,0x00, + 0x92,0x08,0x11,0x04,0x00,0x00,0x15,0x00,0x15,0x00,0x94,0x1c,0x93,0x18,0xd2,0x0c, + 0x91,0x08,0x10,0x04,0x15,0x09,0x15,0x00,0x15,0x00,0x91,0x08,0x10,0x04,0x15,0x00, + 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd2,0xa0,0xd1,0x3c,0xd0,0x1e,0xcf,0x86, + 0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x93,0x10,0x52,0x04,0x13,0x00,0x91,0x08, + 0x10,0x04,0x13,0x09,0x13,0x00,0x13,0x00,0x13,0x00,0xcf,0x86,0x95,0x18,0x94,0x14, + 0x93,0x10,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x13,0x09, + 0x00,0x00,0x13,0x00,0x13,0x00,0xd0,0x46,0xcf,0x86,0xd5,0x2c,0xd4,0x10,0x93,0x0c, + 0x52,0x04,0x13,0x00,0x11,0x04,0x15,0x00,0x13,0x00,0x13,0x00,0x53,0x04,0x13,0x00, + 0xd2,0x0c,0x91,0x08,0x10,0x04,0x13,0x00,0x13,0x09,0x13,0x00,0x91,0x08,0x10,0x04, + 0x13,0x00,0x14,0x00,0x13,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x13,0x00, + 0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0x55,0x04, + 0x10,0x00,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04, + 0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xe3,0xa9,0x01,0xd2, + 0xb0,0xd1,0x6c,0xd0,0x3e,0xcf,0x86,0xd5,0x18,0x94,0x14,0x53,0x04,0x12,0x00,0x92, + 0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x54, + 0x04,0x12,0x00,0xd3,0x10,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12, + 0x00,0x00,0x00,0x52,0x04,0x12,0x00,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x12, + 0x09,0xcf,0x86,0xd5,0x14,0x94,0x10,0x93,0x0c,0x52,0x04,0x12,0x00,0x11,0x04,0x12, + 0x00,0x00,0x00,0x00,0x00,0x12,0x00,0x94,0x14,0x53,0x04,0x12,0x00,0x52,0x04,0x12, + 0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0x12,0x00,0xd0,0x3e,0xcf, + 0x86,0xd5,0x14,0x54,0x04,0x12,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x00,0x00,0x12, + 0x00,0x12,0x00,0x12,0x00,0xd4,0x14,0x53,0x04,0x12,0x00,0x92,0x0c,0x91,0x08,0x10, + 0x04,0x00,0x00,0x12,0x00,0x12,0x00,0x12,0x00,0x93,0x10,0x52,0x04,0x12,0x00,0x51, + 0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1, + 0xa0,0xd0,0x52,0xcf,0x86,0xd5,0x24,0x94,0x20,0xd3,0x10,0x52,0x04,0x13,0x00,0x51, + 0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x92,0x0c,0x51,0x04,0x13,0x00,0x10, + 0x04,0x00,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x54,0x04,0x13,0x00,0xd3,0x10,0x52, + 0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0xd2,0x0c,0x51, + 0x04,0x00,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x51,0x04,0x13,0x00,0x10,0x04,0x00, + 0x00,0x13,0x00,0xcf,0x86,0xd5,0x28,0xd4,0x18,0x93,0x14,0xd2,0x0c,0x51,0x04,0x13, + 0x00,0x10,0x04,0x13,0x07,0x13,0x00,0x11,0x04,0x13,0x09,0x13,0x00,0x00,0x00,0x53, + 0x04,0x13,0x00,0x92,0x08,0x11,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0x94,0x20,0xd3, + 0x10,0x52,0x04,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x92, + 0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x14,0x00,0x14,0x00,0x14,0x00,0xd0, + 0x52,0xcf,0x86,0xd5,0x3c,0xd4,0x14,0x53,0x04,0x14,0x00,0x52,0x04,0x14,0x00,0x51, + 0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x14, + 0x00,0x10,0x04,0x00,0x00,0x14,0x00,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x14, + 0x09,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x94, + 0x10,0x53,0x04,0x14,0x00,0x92,0x08,0x11,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00, + 0x00,0xcf,0x06,0x00,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf, + 0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x14,0x00,0x53,0x04,0x14, + 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xd1, + 0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x15, + 0x00,0x54,0x04,0x15,0x00,0xd3,0x0c,0x92,0x08,0x11,0x04,0x15,0x00,0x00,0x00,0x00, + 0x00,0x52,0x04,0x00,0x00,0x51,0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x15,0x00,0xd0, + 0xca,0xcf,0x86,0xd5,0xc2,0xd4,0x54,0xd3,0x06,0xcf,0x06,0x09,0x00,0xd2,0x06,0xcf, + 0x06,0x09,0x00,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09, + 0x00,0x94,0x14,0x53,0x04,0x09,0x00,0x52,0x04,0x09,0x00,0x51,0x04,0x09,0x00,0x10, + 0x04,0x09,0x00,0x10,0x00,0x10,0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x10, + 0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x11,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x68,0xd2,0x46,0xd1,0x40,0xd0, + 0x06,0xcf,0x06,0x09,0x00,0xcf,0x86,0x55,0x04,0x09,0x00,0xd4,0x20,0xd3,0x10,0x92, + 0x0c,0x51,0x04,0x09,0x00,0x10,0x04,0x09,0x00,0x10,0x00,0x10,0x00,0x52,0x04,0x10, + 0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x93,0x10,0x52,0x04,0x09, + 0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x11, + 0x00,0xd1,0x1c,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x95,0x10,0x94,0x0c,0x93, + 0x08,0x12,0x04,0x11,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00, + 0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x4c,0xd4,0x06,0xcf, + 0x06,0x0b,0x00,0xd3,0x40,0xd2,0x3a,0xd1,0x34,0xd0,0x2e,0xcf,0x86,0x55,0x04,0x0b, + 0x00,0xd4,0x14,0x53,0x04,0x0b,0x00,0x52,0x04,0x0b,0x00,0x51,0x04,0x0b,0x00,0x10, + 0x04,0x0b,0x00,0x00,0x00,0x53,0x04,0x15,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x15, 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf, - 0x86,0xcf,0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5, - 0xa2,0xd4,0x9c,0xd3,0x74,0xd2,0x26,0xd1,0x20,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x94, - 0x10,0x93,0x0c,0x92,0x08,0x11,0x04,0x0c,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13, - 0x00,0x13,0x00,0xcf,0x06,0x13,0x00,0xcf,0x06,0x13,0x00,0xd1,0x48,0xd0,0x1e,0xcf, - 0x86,0x95,0x18,0x54,0x04,0x13,0x00,0x53,0x04,0x13,0x00,0x52,0x04,0x13,0x00,0x51, - 0x04,0x13,0x00,0x10,0x04,0x13,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54, - 0x04,0x00,0x00,0x93,0x10,0x92,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00, - 0x00,0x00,0x00,0x00,0x00,0x94,0x0c,0x93,0x08,0x12,0x04,0x00,0x00,0x15,0x00,0x00, - 0x00,0x13,0x00,0xcf,0x06,0x13,0x00,0xd2,0x22,0xd1,0x06,0xcf,0x06,0x13,0x00,0xd0, - 0x06,0xcf,0x06,0x13,0x00,0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x53, - 0x04,0x13,0x00,0x12,0x04,0x13,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00, - 0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x7e,0xd2,0x78,0xd1,0x34,0xd0,0x06,0xcf, - 0x06,0x10,0x00,0xcf,0x86,0x55,0x04,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92, - 0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10, - 0x00,0x52,0x04,0x10,0x00,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0, - 0x3e,0xcf,0x86,0xd5,0x2c,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10, - 0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0xd2,0x08,0x11, - 0x04,0x10,0x00,0x00,0x00,0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x01,0x10,0x00,0x94, - 0x0c,0x93,0x08,0x12,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00, - 0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xe1,0x92,0x04,0xd0,0x08,0xcf,0x86, - 0xcf,0x06,0x00,0x00,0xcf,0x86,0xe5,0x2f,0x04,0xe4,0x7f,0x02,0xe3,0xf4,0x01,0xd2, - 0x26,0xd1,0x06,0xcf,0x06,0x05,0x00,0xd0,0x06,0xcf,0x06,0x05,0x00,0xcf,0x86,0x55, - 0x04,0x05,0x00,0x54,0x04,0x05,0x00,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05, - 0x00,0x00,0x00,0x00,0x00,0xd1,0xeb,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94, - 0x20,0xd3,0x10,0x52,0x04,0x05,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00, - 0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x05,0x00,0x05,0x00,0x05, - 0x00,0xcf,0x86,0xd5,0x2a,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05, - 0x00,0x51,0x04,0x05,0x00,0x10,0x0d,0x05,0xff,0xf0,0x9d,0x85,0x97,0xf0,0x9d,0x85, - 0xa5,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0x00,0xd4,0x75,0xd3, - 0x61,0xd2,0x44,0xd1,0x22,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85, - 0xa5,0xf0,0x9d,0x85,0xae,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5, - 0xf0,0x9d,0x85,0xaf,0x00,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85, - 0xa5,0xf0,0x9d,0x85,0xb0,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5, - 0xf0,0x9d,0x85,0xb1,0x00,0xd1,0x15,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0, - 0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xb2,0x00,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x01, - 0xd2,0x08,0x11,0x04,0x05,0x01,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe2, - 0x05,0xd8,0xd3,0x10,0x92,0x0c,0x51,0x04,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x00, - 0x05,0x00,0x92,0x0c,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x05,0xdc,0x05,0xdc, + 0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd1,0x4c,0xd0,0x44,0xcf, + 0x86,0xd5,0x3c,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x11,0x00,0xd2, + 0x2a,0xd1,0x24,0xd0,0x06,0xcf,0x06,0x11,0x00,0xcf,0x86,0x95,0x18,0x94,0x14,0x93, + 0x10,0x52,0x04,0x11,0x00,0x51,0x04,0x11,0x00,0x10,0x04,0x11,0x00,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00, + 0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xe0,0xd2,0x01,0xcf,0x86,0xd5,0x06,0xcf,0x06, + 0x00,0x00,0xe4,0x0b,0x01,0xd3,0x06,0xcf,0x06,0x0c,0x00,0xd2,0x84,0xd1,0x50,0xd0, + 0x1e,0xcf,0x86,0x55,0x04,0x0c,0x00,0x54,0x04,0x0c,0x00,0x53,0x04,0x0c,0x00,0x92, + 0x0c,0x91,0x08,0x10,0x04,0x0c,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5, + 0x18,0x54,0x04,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x51,0x04,0x10, + 0x00,0x10,0x04,0x10,0x00,0x00,0x00,0x94,0x14,0x53,0x04,0x10,0x00,0xd2,0x08,0x11, + 0x04,0x10,0x00,0x00,0x00,0x11,0x04,0x00,0x00,0x10,0x00,0x00,0x00,0xd0,0x06,0xcf, + 0x06,0x00,0x00,0xcf,0x86,0xd5,0x08,0x14,0x04,0x00,0x00,0x10,0x00,0xd4,0x10,0x53, + 0x04,0x10,0x00,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00,0x00,0x93,0x10,0x52, + 0x04,0x10,0x01,0x91,0x08,0x10,0x04,0x10,0x01,0x10,0x00,0x00,0x00,0x00,0x00,0xd1, + 0x6c,0xd0,0x1e,0xcf,0x86,0x55,0x04,0x10,0x00,0x54,0x04,0x10,0x00,0x93,0x10,0x52, + 0x04,0x10,0xe6,0x51,0x04,0x10,0xe6,0x10,0x04,0x10,0xe6,0x10,0x00,0x10,0x00,0xcf, + 0x86,0xd5,0x24,0xd4,0x10,0x93,0x0c,0x52,0x04,0x10,0x00,0x11,0x04,0x10,0x00,0x00, + 0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04,0x10,0x00,0x10,0x04,0x00, + 0x00,0x10,0x00,0x10,0x00,0xd4,0x14,0x93,0x10,0x92,0x0c,0x51,0x04,0x10,0x00,0x10, + 0x04,0x00,0x00,0x10,0x00,0x10,0x00,0x10,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x00, + 0x00,0x91,0x08,0x10,0x04,0x00,0x00,0x10,0x00,0x10,0x00,0xd0,0x0e,0xcf,0x86,0x95, + 0x08,0x14,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf, + 0x06,0x00,0x00,0xd2,0x30,0xd1,0x0c,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x06,0x14, + 0x00,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04,0x14,0x00,0x53,0x04,0x14,0x00,0x92, + 0x0c,0x51,0x04,0x14,0x00,0x10,0x04,0x14,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf, + 0x06,0x00,0x00,0xd1,0x4c,0xd0,0x06,0xcf,0x06,0x0d,0x00,0xcf,0x86,0xd5,0x2c,0x94, + 0x28,0xd3,0x10,0x52,0x04,0x0d,0x00,0x91,0x08,0x10,0x04,0x0d,0x00,0x15,0x00,0x15, + 0x00,0xd2,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,0x51,0x04,0x00, + 0x00,0x10,0x04,0x00,0x00,0x15,0x00,0x0d,0x00,0x54,0x04,0x0d,0x00,0x53,0x04,0x0d, + 0x00,0x52,0x04,0x0d,0x00,0x51,0x04,0x0d,0x00,0x10,0x04,0x0d,0x00,0x15,0x00,0xd0, + 0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x53,0x04,0x15,0x00,0x52,0x04,0x00,0x00,0x51, + 0x04,0x00,0x00,0x10,0x04,0x00,0x00,0x0d,0x00,0x0d,0x00,0x00,0x00,0xcf,0x86,0x55, + 0x04,0x00,0x00,0x94,0x14,0x93,0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x12,0x00,0x13, + 0x00,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xcf,0x06,0x12,0x00,0xe2, + 0xc6,0x01,0xd1,0x8e,0xd0,0x86,0xcf,0x86,0xd5,0x48,0xd4,0x06,0xcf,0x06,0x12,0x00, + 0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x06,0xcf,0x06,0x12,0x00,0xd1,0x06,0xcf,0x06, + 0x12,0x00,0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12,0x00,0xd4,0x14, + 0x53,0x04,0x12,0x00,0x52,0x04,0x12,0x00,0x91,0x08,0x10,0x04,0x12,0x00,0x14,0x00, + 0x14,0x00,0x93,0x0c,0x92,0x08,0x11,0x04,0x14,0x00,0x15,0x00,0x15,0x00,0x00,0x00, + 0xd4,0x36,0xd3,0x06,0xcf,0x06,0x12,0x00,0xd2,0x2a,0xd1,0x06,0xcf,0x06,0x12,0x00, + 0xd0,0x06,0xcf,0x06,0x12,0x00,0xcf,0x86,0x55,0x04,0x12,0x00,0x54,0x04,0x12,0x00, + 0x93,0x10,0x92,0x0c,0x51,0x04,0x12,0x00,0x10,0x04,0x12,0x00,0x00,0x00,0x00,0x00, + 0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00, + 0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0xa2,0xd4,0x9c,0xd3,0x74, + 0xd2,0x26,0xd1,0x20,0xd0,0x1a,0xcf,0x86,0x95,0x14,0x94,0x10,0x93,0x0c,0x92,0x08, + 0x11,0x04,0x0c,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0x13,0x00,0xcf,0x06, + 0x13,0x00,0xcf,0x06,0x13,0x00,0xd1,0x48,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x54,0x04, + 0x13,0x00,0x53,0x04,0x13,0x00,0x52,0x04,0x13,0x00,0x51,0x04,0x13,0x00,0x10,0x04, + 0x13,0x00,0x00,0x00,0x00,0x00,0xcf,0x86,0xd5,0x18,0x54,0x04,0x00,0x00,0x93,0x10, + 0x92,0x0c,0x51,0x04,0x15,0x00,0x10,0x04,0x15,0x00,0x00,0x00,0x00,0x00,0x00,0x00, + 0x94,0x0c,0x93,0x08,0x12,0x04,0x00,0x00,0x15,0x00,0x00,0x00,0x13,0x00,0xcf,0x06, + 0x13,0x00,0xd2,0x22,0xd1,0x06,0xcf,0x06,0x13,0x00,0xd0,0x06,0xcf,0x06,0x13,0x00, + 0xcf,0x86,0x55,0x04,0x13,0x00,0x54,0x04,0x13,0x00,0x53,0x04,0x13,0x00,0x12,0x04, + 0x13,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xd4,0x06,0xcf,0x06, + 0x00,0x00,0xd3,0x7f,0xd2,0x79,0xd1,0x34,0xd0,0x06,0xcf,0x06,0x10,0x00,0xcf,0x86, + 0x55,0x04,0x10,0x00,0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x51,0x04,0x10,0x00, + 0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0x52,0x04,0x10,0x00, + 0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00,0x00,0x00,0xd0,0x3f,0xcf,0x86,0xd5,0x2c, + 0xd4,0x14,0x53,0x04,0x10,0x00,0x92,0x0c,0x91,0x08,0x10,0x04,0x10,0x00,0x00,0x00, + 0x00,0x00,0x00,0x00,0x53,0x04,0x10,0x00,0xd2,0x08,0x11,0x04,0x10,0x00,0x00,0x00, + 0x51,0x04,0x10,0x00,0x10,0x04,0x10,0x01,0x10,0x00,0x94,0x0d,0x93,0x09,0x12,0x05, + 0x10,0xff,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00, + 0x00,0xcf,0x06,0x00,0x00,0xe1,0x96,0x04,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00, + 0xcf,0x86,0xe5,0x33,0x04,0xe4,0x83,0x02,0xe3,0xf8,0x01,0xd2,0x26,0xd1,0x06,0xcf, + 0x06,0x05,0x00,0xd0,0x06,0xcf,0x06,0x05,0x00,0xcf,0x86,0x55,0x04,0x05,0x00,0x54, + 0x04,0x05,0x00,0x93,0x0c,0x52,0x04,0x05,0x00,0x11,0x04,0x05,0x00,0x00,0x00,0x00, + 0x00,0xd1,0xef,0xd0,0x2a,0xcf,0x86,0x55,0x04,0x05,0x00,0x94,0x20,0xd3,0x10,0x52, + 0x04,0x05,0x00,0x51,0x04,0x05,0x00,0x10,0x04,0x05,0x00,0x00,0x00,0x92,0x0c,0x91, + 0x08,0x10,0x04,0x00,0x00,0x0a,0x00,0x05,0x00,0x05,0x00,0x05,0x00,0xcf,0x86,0xd5, + 0x2a,0x54,0x04,0x05,0x00,0x53,0x04,0x05,0x00,0x52,0x04,0x05,0x00,0x51,0x04,0x05, + 0x00,0x10,0x0d,0x05,0xff,0xf0,0x9d,0x85,0x97,0xf0,0x9d,0x85,0xa5,0x00,0x05,0xff, + 0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0x00,0xd4,0x75,0xd3,0x61,0xd2,0x44,0xd1, + 0x22,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85, + 0xae,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xaf, + 0x00,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85, + 0xb0,0x00,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0,0x9d,0x85,0xb1, + 0x00,0xd1,0x15,0x10,0x11,0x05,0xff,0xf0,0x9d,0x85,0x98,0xf0,0x9d,0x85,0xa5,0xf0, + 0x9d,0x85,0xb2,0x00,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0x01,0xd2,0x08,0x11,0x04, + 0x05,0x01,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe2,0x05,0xd8,0xd3,0x12, + 0x92,0x0d,0x51,0x04,0x05,0xd8,0x10,0x04,0x05,0xd8,0x05,0xff,0x00,0x05,0xff,0x00, + 0x92,0x0e,0x51,0x05,0x05,0xff,0x00,0x10,0x05,0x05,0xff,0x00,0x05,0xdc,0x05,0xdc, 0xd0,0x97,0xcf,0x86,0xd5,0x28,0x94,0x24,0xd3,0x18,0xd2,0x0c,0x51,0x04,0x05,0xdc, 0x10,0x04,0x05,0xdc,0x05,0x00,0x91,0x08,0x10,0x04,0x05,0x00,0x05,0xe6,0x05,0xe6, 0x92,0x08,0x11,0x04,0x05,0xe6,0x05,0xdc,0x05,0x00,0x05,0x00,0xd4,0x14,0x53,0x04, @@ -4080,20 +4090,21 @@ static const unsigned char utf8data[64080] = { 0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2,0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf, 0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54, 0x04,0x00,0x00,0x53,0x04,0x00,0x00,0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02, - 0x00,0xd4,0xc8,0xd3,0x70,0xd2,0x68,0xd1,0x60,0xd0,0x58,0xcf,0x86,0xd5,0x50,0xd4, - 0x4a,0xd3,0x44,0xd2,0x2a,0xd1,0x24,0xd0,0x1e,0xcf,0x86,0x95,0x18,0x94,0x14,0x93, - 0x10,0x92,0x0c,0x91,0x08,0x10,0x04,0x00,0x00,0x05,0x00,0x00,0x00,0x00,0x00,0x00, - 0x00,0x00,0x00,0x05,0x00,0xcf,0x06,0x05,0x00,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf, - 0x06,0x07,0x00,0xd0,0x06,0xcf,0x06,0x07,0x00,0xcf,0x86,0x55,0x04,0x07,0x00,0x14, - 0x04,0x07,0x00,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00,0x00,0xcf,0x06,0x00, - 0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf, - 0x06,0x00,0x00,0xd2,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xd1,0x08,0xcf,0x86,0xcf, - 0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x06,0xcf, - 0x06,0x00,0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00,0xd2, - 0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06,0x00, - 0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x00,0x00,0x52, - 0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,0x00,0xcf,0x86,0xcf,0x06,0x02,0x00,0x81, - 0x80,0xcf,0x86,0x85,0x84,0xcf,0x86,0xcf,0x06,0x02,0x00,0x00,0x00,0x00,0x00,0x00 + 0x00,0xd4,0xd9,0xd3,0x81,0xd2,0x79,0xd1,0x71,0xd0,0x69,0xcf,0x86,0xd5,0x60,0xd4, + 0x59,0xd3,0x52,0xd2,0x33,0xd1,0x2c,0xd0,0x25,0xcf,0x86,0x95,0x1e,0x94,0x19,0x93, + 0x14,0x92,0x0f,0x91,0x0a,0x10,0x05,0x00,0xff,0x00,0x05,0xff,0x00,0x00,0xff,0x00, + 0x00,0xff,0x00,0x00,0xff,0x00,0x00,0xff,0x00,0x05,0xff,0x00,0xcf,0x06,0x05,0xff, + 0x00,0xcf,0x06,0x00,0xff,0x00,0xd1,0x07,0xcf,0x06,0x07,0xff,0x00,0xd0,0x07,0xcf, + 0x06,0x07,0xff,0x00,0xcf,0x86,0x55,0x05,0x07,0xff,0x00,0x14,0x05,0x07,0xff,0x00, + 0x00,0xff,0x00,0xcf,0x06,0x00,0xff,0x00,0xcf,0x06,0x00,0xff,0x00,0xcf,0x06,0x00, + 0xff,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86, + 0xcf,0x06,0x00,0x00,0xd2,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xd1,0x08,0xcf,0x86, + 0xcf,0x06,0x00,0x00,0xd0,0x08,0xcf,0x86,0xcf,0x06,0x00,0x00,0xcf,0x86,0xd5,0x06, + 0xcf,0x06,0x00,0x00,0xd4,0x06,0xcf,0x06,0x00,0x00,0xd3,0x06,0xcf,0x06,0x00,0x00, + 0xd2,0x06,0xcf,0x06,0x00,0x00,0xd1,0x06,0xcf,0x06,0x00,0x00,0xd0,0x06,0xcf,0x06, + 0x00,0x00,0xcf,0x86,0x55,0x04,0x00,0x00,0x54,0x04,0x00,0x00,0x53,0x04,0x00,0x00, + 0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,0x00,0xcf,0x86,0xcf,0x06,0x02,0x00, + 0x81,0x80,0xcf,0x86,0x85,0x84,0xcf,0x86,0xcf,0x06,0x02,0x00,0x00,0x00,0x00,0x00 }; struct utf8data_table utf8_data_table = { diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c index 74952e58cca0..48f33d4994dc 100644 --- a/fs/vboxsf/super.c +++ b/fs/vboxsf/super.c @@ -21,7 +21,8 @@ #define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */ -static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375"; +static const unsigned char VBSF_MOUNT_SIGNATURE[4] = { '\000', '\377', '\376', + '\375' }; static int follow_symlinks; module_param(follow_symlinks, int, 0444); diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index bf47efe08a58..9743fa5b5388 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -259,6 +259,30 @@ xfs_agino_range( return __xfs_agino_range(mp, xfs_ag_block_count(mp, agno), first, last); } +/* + * Free perag within the specified AG range, it is only used to free unused + * perags under the error handling path. + */ +void +xfs_free_unused_perag_range( + struct xfs_mount *mp, + xfs_agnumber_t agstart, + xfs_agnumber_t agend) +{ + struct xfs_perag *pag; + xfs_agnumber_t index; + + for (index = agstart; index < agend; index++) { + spin_lock(&mp->m_perag_lock); + pag = radix_tree_delete(&mp->m_perag_tree, index); + spin_unlock(&mp->m_perag_lock); + if (!pag) + break; + xfs_buf_hash_destroy(pag); + kmem_free(pag); + } +} + int xfs_initialize_perag( struct xfs_mount *mp, @@ -345,18 +369,14 @@ xfs_initialize_perag( return 0; out_remove_pag: + spin_lock(&mp->m_perag_lock); radix_tree_delete(&mp->m_perag_tree, index); + spin_unlock(&mp->m_perag_lock); out_free_pag: kmem_free(pag); out_unwind_new_pags: /* unwind any prior newly initialized pags */ - for (index = first_initialised; index < agcount; index++) { - pag = radix_tree_delete(&mp->m_perag_tree, index); - if (!pag) - break; - xfs_buf_hash_destroy(pag); - kmem_free(pag); - } + xfs_free_unused_perag_range(mp, first_initialised, agcount); return error; } @@ -906,7 +926,10 @@ xfs_ag_shrink_space( if (err2 != -ENOSPC) goto resv_err; - __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, true); + err2 = __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, + XFS_AG_RESV_NONE, true); + if (err2) + goto resv_err; /* * Roll the transaction before trying to re-init the per-ag @@ -981,10 +1004,8 @@ xfs_ag_extend_space( if (error) return error; - error = xfs_free_extent(tp, XFS_AGB_TO_FSB(pag->pag_mount, pag->pag_agno, - be32_to_cpu(agf->agf_length) - len), - len, &XFS_RMAP_OINFO_SKIP_UPDATE, - XFS_AG_RESV_NONE); + error = xfs_free_extent(tp, pag, be32_to_cpu(agf->agf_length) - len, + len, &XFS_RMAP_OINFO_SKIP_UPDATE, XFS_AG_RESV_NONE); if (error) return error; diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h index 191b22b9a35b..eb84af1c8628 100644 --- a/fs/xfs/libxfs/xfs_ag.h +++ b/fs/xfs/libxfs/xfs_ag.h @@ -106,6 +106,9 @@ struct xfs_perag { #endif /* __KERNEL__ */ }; + +void xfs_free_unused_perag_range(struct xfs_mount *mp, xfs_agnumber_t agstart, + xfs_agnumber_t agend); int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount, xfs_rfsblock_t dcount, xfs_agnumber_t *maxagi); int xfs_initialize_perag_data(struct xfs_mount *mp, xfs_agnumber_t agno); diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 8bb024b06b95..c08265f19136 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2273,16 +2273,37 @@ xfs_alloc_min_freelist( ASSERT(mp->m_alloc_maxlevels > 0); + /* + * For a btree shorter than the maximum height, the worst case is that + * every level gets split and a new level is added, then while inserting + * another entry to refill the AGFL, every level under the old root gets + * split again. This is: + * + * (full height split reservation) + (AGFL refill split height) + * = (current height + 1) + (current height - 1) + * = (new height) + (new height - 2) + * = 2 * new height - 2 + * + * For a btree of maximum height, the worst case is that every level + * under the root gets split, then while inserting another entry to + * refill the AGFL, every level under the root gets split again. This is + * also: + * + * 2 * (current height - 1) + * = 2 * (new height - 1) + * = 2 * new height - 2 + */ + /* space needed by-bno freespace btree */ min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1, - mp->m_alloc_maxlevels); + mp->m_alloc_maxlevels) * 2 - 2; /* space needed by-size freespace btree */ min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1, - mp->m_alloc_maxlevels); + mp->m_alloc_maxlevels) * 2 - 2; /* space needed reverse mapping used space btree */ if (xfs_has_rmapbt(mp)) min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1, - mp->m_rmap_maxlevels); + mp->m_rmap_maxlevels) * 2 - 2; return min_free; } @@ -2464,45 +2485,53 @@ xfs_agfl_reset( * the real allocation can proceed. Deferring the free disconnects freeing up * the AGFL slot from freeing the block. */ -STATIC void +static int xfs_defer_agfl_block( struct xfs_trans *tp, xfs_agnumber_t agno, - xfs_fsblock_t agbno, + xfs_agblock_t agbno, struct xfs_owner_info *oinfo) { struct xfs_mount *mp = tp->t_mountp; - struct xfs_extent_free_item *new; /* new element */ + struct xfs_extent_free_item *xefi; + xfs_fsblock_t fsbno = XFS_AGB_TO_FSB(mp, agno, agbno); ASSERT(xfs_extfree_item_cache != NULL); ASSERT(oinfo != NULL); - new = kmem_cache_zalloc(xfs_extfree_item_cache, + if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, fsbno))) + return -EFSCORRUPTED; + + xefi = kmem_cache_zalloc(xfs_extfree_item_cache, GFP_KERNEL | __GFP_NOFAIL); - new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); - new->xefi_blockcount = 1; - new->xefi_owner = oinfo->oi_owner; + xefi->xefi_startblock = fsbno; + xefi->xefi_blockcount = 1; + xefi->xefi_owner = oinfo->oi_owner; + xefi->xefi_agresv = XFS_AG_RESV_AGFL; trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1); - xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list); + xfs_extent_free_get_group(mp, xefi); + xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &xefi->xefi_list); + return 0; } /* * Add the extent to the list of extents to be free at transaction end. * The list is maintained sorted (by block number). */ -void +int __xfs_free_extent_later( struct xfs_trans *tp, xfs_fsblock_t bno, xfs_filblks_t len, const struct xfs_owner_info *oinfo, + enum xfs_ag_resv_type type, bool skip_discard) { - struct xfs_extent_free_item *new; /* new element */ -#ifdef DEBUG + struct xfs_extent_free_item *xefi; struct xfs_mount *mp = tp->t_mountp; +#ifdef DEBUG xfs_agnumber_t agno; xfs_agblock_t agbno; @@ -2518,28 +2547,36 @@ __xfs_free_extent_later( ASSERT(agbno + len <= mp->m_sb.sb_agblocks); #endif ASSERT(xfs_extfree_item_cache != NULL); + ASSERT(type != XFS_AG_RESV_AGFL); - new = kmem_cache_zalloc(xfs_extfree_item_cache, + if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len))) + return -EFSCORRUPTED; + + xefi = kmem_cache_zalloc(xfs_extfree_item_cache, GFP_KERNEL | __GFP_NOFAIL); - new->xefi_startblock = bno; - new->xefi_blockcount = (xfs_extlen_t)len; + xefi->xefi_startblock = bno; + xefi->xefi_blockcount = (xfs_extlen_t)len; + xefi->xefi_agresv = type; if (skip_discard) - new->xefi_flags |= XFS_EFI_SKIP_DISCARD; + xefi->xefi_flags |= XFS_EFI_SKIP_DISCARD; if (oinfo) { ASSERT(oinfo->oi_offset == 0); if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK) - new->xefi_flags |= XFS_EFI_ATTR_FORK; + xefi->xefi_flags |= XFS_EFI_ATTR_FORK; if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK) - new->xefi_flags |= XFS_EFI_BMBT_BLOCK; - new->xefi_owner = oinfo->oi_owner; + xefi->xefi_flags |= XFS_EFI_BMBT_BLOCK; + xefi->xefi_owner = oinfo->oi_owner; } else { - new->xefi_owner = XFS_RMAP_OWN_NULL; + xefi->xefi_owner = XFS_RMAP_OWN_NULL; } - trace_xfs_bmap_free_defer(tp->t_mountp, + trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0, XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len); - xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list); + + xfs_extent_free_get_group(mp, xefi); + xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &xefi->xefi_list); + return 0; } #ifdef DEBUG @@ -2699,7 +2736,9 @@ xfs_alloc_fix_freelist( goto out_agbp_relse; /* defer agfl frees */ - xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo); + error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo); + if (error) + goto out_agbp_relse; } targs.tp = tp; @@ -3426,7 +3465,8 @@ xfs_free_extent_fix_freelist( int __xfs_free_extent( struct xfs_trans *tp, - xfs_fsblock_t bno, + struct xfs_perag *pag, + xfs_agblock_t agbno, xfs_extlen_t len, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type, @@ -3434,12 +3474,9 @@ __xfs_free_extent( { struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *agbp; - xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno); - xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno); struct xfs_agf *agf; int error; unsigned int busy_flags = 0; - struct xfs_perag *pag; ASSERT(len != 0); ASSERT(type != XFS_AG_RESV_AGFL); @@ -3448,10 +3485,9 @@ __xfs_free_extent( XFS_ERRTAG_FREE_EXTENT)) return -EIO; - pag = xfs_perag_get(mp, agno); error = xfs_free_extent_fix_freelist(tp, pag, &agbp); if (error) - goto err; + return error; agf = agbp->b_addr; if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) { @@ -3465,20 +3501,18 @@ __xfs_free_extent( goto err_release; } - error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type); + error = xfs_free_ag_extent(tp, agbp, pag->pag_agno, agbno, len, oinfo, + type); if (error) goto err_release; if (skip_discard) busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD; xfs_extent_busy_insert(tp, pag, agbno, len, busy_flags); - xfs_perag_put(pag); return 0; err_release: xfs_trans_brelse(tp, agbp); -err: - xfs_perag_put(pag); return error; } diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h index 2c3f762dfb58..2dd93d62150f 100644 --- a/fs/xfs/libxfs/xfs_alloc.h +++ b/fs/xfs/libxfs/xfs_alloc.h @@ -130,7 +130,8 @@ xfs_alloc_vextent( int /* error */ __xfs_free_extent( struct xfs_trans *tp, /* transaction pointer */ - xfs_fsblock_t bno, /* starting block number of extent */ + struct xfs_perag *pag, + xfs_agblock_t agbno, xfs_extlen_t len, /* length of extent */ const struct xfs_owner_info *oinfo, /* extent owner */ enum xfs_ag_resv_type type, /* block reservation type */ @@ -139,12 +140,13 @@ __xfs_free_extent( static inline int xfs_free_extent( struct xfs_trans *tp, - xfs_fsblock_t bno, + struct xfs_perag *pag, + xfs_agblock_t agbno, xfs_extlen_t len, const struct xfs_owner_info *oinfo, enum xfs_ag_resv_type type) { - return __xfs_free_extent(tp, bno, len, oinfo, type, false); + return __xfs_free_extent(tp, pag, agbno, len, oinfo, type, false); } int /* error */ @@ -211,9 +213,9 @@ xfs_buf_to_agfl_bno( return bp->b_addr; } -void __xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno, +int __xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno, xfs_filblks_t len, const struct xfs_owner_info *oinfo, - bool skip_discard); + enum xfs_ag_resv_type type, bool skip_discard); /* * List of extents to be free "later". @@ -224,21 +226,27 @@ struct xfs_extent_free_item { uint64_t xefi_owner; xfs_fsblock_t xefi_startblock;/* starting fs block number */ xfs_extlen_t xefi_blockcount;/* number of blocks in extent */ + struct xfs_perag *xefi_pag; unsigned int xefi_flags; + enum xfs_ag_resv_type xefi_agresv; }; +void xfs_extent_free_get_group(struct xfs_mount *mp, + struct xfs_extent_free_item *xefi); + #define XFS_EFI_SKIP_DISCARD (1U << 0) /* don't issue discard */ #define XFS_EFI_ATTR_FORK (1U << 1) /* freeing attr fork block */ #define XFS_EFI_BMBT_BLOCK (1U << 2) /* freeing bmap btree block */ -static inline void +static inline int xfs_free_extent_later( struct xfs_trans *tp, xfs_fsblock_t bno, xfs_filblks_t len, - const struct xfs_owner_info *oinfo) + const struct xfs_owner_info *oinfo, + enum xfs_ag_resv_type type) { - __xfs_free_extent_later(tp, bno, len, oinfo, false); + return __xfs_free_extent_later(tp, bno, len, oinfo, type, false); } diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index e28d93d232de..32d350e97e0f 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -421,10 +421,10 @@ xfs_attr_complete_op( bool do_replace = args->op_flags & XFS_DA_OP_REPLACE; args->op_flags &= ~XFS_DA_OP_REPLACE; - if (do_replace) { - args->attr_filter &= ~XFS_ATTR_INCOMPLETE; + args->attr_filter &= ~XFS_ATTR_INCOMPLETE; + if (do_replace) return replace_state; - } + return XFS_DAS_DONE; } diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c index d440393b40eb..54de405cbab5 100644 --- a/fs/xfs/libxfs/xfs_attr_remote.c +++ b/fs/xfs/libxfs/xfs_attr_remote.c @@ -619,7 +619,6 @@ xfs_attr_rmtval_set_blk( if (error) return error; - ASSERT(nmap == 1); ASSERT((map->br_startblock != DELAYSTARTBLOCK) && (map->br_startblock != HOLESTARTBLOCK)); diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 9dc33cdc2ab9..14b0d230f61b 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -21,7 +21,7 @@ #include "xfs_bmap.h" #include "xfs_bmap_util.h" #include "xfs_bmap_btree.h" -#include "xfs_rtalloc.h" +#include "xfs_rtbitmap.h" #include "xfs_errortag.h" #include "xfs_error.h" #include "xfs_quota.h" @@ -572,8 +572,13 @@ xfs_bmap_btree_to_extents( cblock = XFS_BUF_TO_BLOCK(cbp); if ((error = xfs_btree_check_block(cur, cblock, 0, cbp))) return error; + xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork); - xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo); + error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo, + XFS_AG_RESV_NONE); + if (error) + return error; + ip->i_nblocks--; xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); xfs_trans_binval(tp, cbp); @@ -1525,6 +1530,7 @@ xfs_bmap_add_extent_delay_real( if (error) goto done; } + ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG: @@ -1554,6 +1560,7 @@ xfs_bmap_add_extent_delay_real( if (error) goto done; } + ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: @@ -1587,6 +1594,7 @@ xfs_bmap_add_extent_delay_real( if (error) goto done; } + ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING: @@ -1619,6 +1627,7 @@ xfs_bmap_add_extent_delay_real( goto done; } } + ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG: @@ -1656,6 +1665,7 @@ xfs_bmap_add_extent_delay_real( if (error) goto done; } + ASSERT(da_new <= da_old); break; case BMAP_LEFT_FILLING: @@ -1743,6 +1753,7 @@ xfs_bmap_add_extent_delay_real( xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV); xfs_iext_next(ifp, &bma->icur); xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT); + ASSERT(da_new <= da_old); break; case BMAP_RIGHT_FILLING: @@ -1790,6 +1801,7 @@ xfs_bmap_add_extent_delay_real( PREV.br_blockcount = temp; xfs_iext_insert(bma->ip, &bma->icur, &PREV, state); xfs_iext_next(ifp, &bma->icur); + ASSERT(da_new <= da_old); break; case 0: @@ -1910,11 +1922,9 @@ xfs_bmap_add_extent_delay_real( } /* adjust for changes in reserved delayed indirect blocks */ - if (da_new != da_old) { - ASSERT(state == 0 || da_new < da_old); + if (da_new != da_old) error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), - false); - } + true); xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork); done: @@ -3949,20 +3959,32 @@ xfs_bmapi_reserve_delalloc( xfs_extlen_t alen; xfs_extlen_t indlen; int error; - xfs_fileoff_t aoff = off; + xfs_fileoff_t aoff; + bool use_cowextszhint = + whichfork == XFS_COW_FORK && !prealloc; +retry: /* * Cap the alloc length. Keep track of prealloc so we know whether to * tag the inode before we return. */ + aoff = off; alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN); if (!eof) alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); if (prealloc && alen >= len) prealloc = alen - len; - /* Figure out the extent size, adjust alen */ - if (whichfork == XFS_COW_FORK) { + /* + * If we're targetting the COW fork but aren't creating a speculative + * posteof preallocation, try to expand the reservation to align with + * the COW extent size hint if there's sufficient free space. + * + * Unlike the data fork, the CoW cancellation functions will free all + * the reservations at inactivation, so we don't require that every + * delalloc reservation have a dirty pagecache. + */ + if (use_cowextszhint) { struct xfs_bmbt_irec prev; xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); @@ -3981,7 +4003,7 @@ xfs_bmapi_reserve_delalloc( */ error = xfs_quota_reserve_blkres(ip, alen); if (error) - return error; + goto out; /* * Split changing sb for alen and indlen since they could be coming @@ -4026,6 +4048,17 @@ out_unreserve_blocks: out_unreserve_quota: if (XFS_IS_QUOTA_ON(mp)) xfs_quota_unreserve_blkres(ip, alen); +out: + if (error == -ENOSPC || error == -EDQUOT) { + trace_xfs_delalloc_enospc(ip, off, len); + + if (prealloc || use_cowextszhint) { + /* retry without any preallocation */ + use_cowextszhint = false; + prealloc = 0; + goto retry; + } + } return error; } @@ -4108,8 +4141,10 @@ xfs_bmapi_allocate( } else { error = xfs_bmap_alloc_userdata(bma); } - if (error || bma->blkno == NULLFSBLOCK) + if (error) return error; + if (bma->blkno == NULLFSBLOCK) + return -ENOSPC; if (bma->flags & XFS_BMAPI_ZERO) { error = xfs_zero_extent(bma->ip, bma->blkno, bma->length); @@ -4289,6 +4324,15 @@ xfs_bmapi_finish( * extent state if necessary. Details behaviour is controlled by the flags * parameter. Only allocates blocks from a single allocation group, to avoid * locking problems. + * + * Returns 0 on success and places the extent mappings in mval. nmaps is used + * as an input/output parameter where the caller specifies the maximum number + * of mappings that may be returned and xfs_bmapi_write passes back the number + * of mappings (including existing mappings) it found. + * + * Returns a negative error code on failure, including -ENOSPC when it could not + * allocate any blocks and -ENOSR when it did allocate blocks to convert a + * delalloc range, but those blocks were before the passed in range. */ int xfs_bmapi_write( @@ -4416,10 +4460,16 @@ xfs_bmapi_write( ASSERT(len > 0); ASSERT(bma.length > 0); error = xfs_bmapi_allocate(&bma); - if (error) + if (error) { + /* + * If we already allocated space in a previous + * iteration return what we go so far when + * running out of space. + */ + if (error == -ENOSPC && bma.nallocs) + break; goto error0; - if (bma.blkno == NULLFSBLOCK) - break; + } /* * If this is a CoW allocation, record the data in @@ -4457,7 +4507,6 @@ xfs_bmapi_write( if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got)) eof = true; } - *nmap = n; error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags, whichfork); @@ -4468,7 +4517,22 @@ xfs_bmapi_write( ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork)); xfs_bmapi_finish(&bma, whichfork, 0); xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval, - orig_nmap, *nmap); + orig_nmap, n); + + /* + * When converting delayed allocations, xfs_bmapi_allocate ignores + * the passed in bno and always converts from the start of the found + * delalloc extent. + * + * To avoid a successful return with *nmap set to 0, return the magic + * -ENOSR error code for this particular case so that the caller can + * handle it. + */ + if (!n) { + ASSERT(bma.nallocs >= *nmap); + return -ENOSR; + } + *nmap = n; return 0; error0: xfs_bmapi_finish(&bma, whichfork, error); @@ -4481,8 +4545,8 @@ error0: * invocations to allocate the target offset if a large enough physical extent * is not available. */ -int -xfs_bmapi_convert_delalloc( +static int +xfs_bmapi_convert_one_delalloc( struct xfs_inode *ip, int whichfork, xfs_off_t offset, @@ -4539,7 +4603,8 @@ xfs_bmapi_convert_delalloc( if (!isnullstartblock(bma.got.br_startblock)) { xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, xfs_iomap_inode_sequence(ip, flags)); - *seq = READ_ONCE(ifp->if_seq); + if (seq) + *seq = READ_ONCE(ifp->if_seq); goto out_trans_cancel; } @@ -4575,9 +4640,6 @@ xfs_bmapi_convert_delalloc( if (error) goto out_finish; - error = -ENOSPC; - if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK)) - goto out_finish; error = -EFSCORRUPTED; if (WARN_ON_ONCE(!xfs_valid_startblock(ip, bma.got.br_startblock))) goto out_finish; @@ -4588,7 +4650,8 @@ xfs_bmapi_convert_delalloc( ASSERT(!isnullstartblock(bma.got.br_startblock)); xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags, xfs_iomap_inode_sequence(ip, flags)); - *seq = READ_ONCE(ifp->if_seq); + if (seq) + *seq = READ_ONCE(ifp->if_seq); if (whichfork == XFS_COW_FORK) xfs_refcount_alloc_cow_extent(tp, bma.blkno, bma.length); @@ -4611,6 +4674,36 @@ out_trans_cancel: return error; } +/* + * Pass in a dellalloc extent and convert it to real extents, return the real + * extent that maps offset_fsb in iomap. + */ +int +xfs_bmapi_convert_delalloc( + struct xfs_inode *ip, + int whichfork, + loff_t offset, + struct iomap *iomap, + unsigned int *seq) +{ + int error; + + /* + * Attempt to allocate whatever delalloc extent currently backs offset + * and put the result into iomap. Allocate in a loop because it may + * take several attempts to allocate real blocks for a contiguous + * delalloc extent if free space is sufficiently fragmented. + */ + do { + error = xfs_bmapi_convert_one_delalloc(ip, whichfork, offset, + iomap, seq); + if (error) + return error; + } while (iomap->offset + iomap->length <= offset); + + return 0; +} + int xfs_bmapi_remap( struct xfs_trans *tp, @@ -4807,7 +4900,7 @@ xfs_bmap_del_extent_delay( ASSERT(got_endoff >= del_endoff); if (isrt) { - uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount); + uint64_t rtexts = del->br_blockcount; do_div(rtexts, mp->m_sb.sb_rextsize); xfs_mod_frextents(mp, rtexts); @@ -4994,7 +5087,6 @@ xfs_bmap_del_extent_real( xfs_fileoff_t del_endoff; /* first offset past del */ int do_fx; /* free extent at end of routine */ int error; /* error return value */ - int flags = 0;/* inode logging flags */ struct xfs_bmbt_irec got; /* current extent entry */ xfs_fileoff_t got_endoff; /* first offset past got */ int i; /* temp state */ @@ -5007,6 +5099,8 @@ xfs_bmap_del_extent_real( uint32_t state = xfs_bmap_fork_to_state(whichfork); struct xfs_bmbt_irec old; + *logflagsp = 0; + mp = ip->i_mount; XFS_STATS_INC(mp, xs_del_exlist); @@ -5019,7 +5113,6 @@ xfs_bmap_del_extent_real( ASSERT(got_endoff >= del_endoff); ASSERT(!isnullstartblock(got.br_startblock)); qfield = 0; - error = 0; /* * If it's the case where the directory code is running with no block @@ -5035,45 +5128,30 @@ xfs_bmap_del_extent_real( del->br_startoff > got.br_startoff && del_endoff < got_endoff) return -ENOSPC; - flags = XFS_ILOG_CORE; + *logflagsp = XFS_ILOG_CORE; if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) { - xfs_filblks_t len; - xfs_extlen_t mod; - - len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize, - &mod); - ASSERT(mod == 0); - if (!(bflags & XFS_BMAPI_REMAP)) { - xfs_fsblock_t bno; - - bno = div_u64_rem(del->br_startblock, - mp->m_sb.sb_rextsize, &mod); - ASSERT(mod == 0); - - error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len); + error = xfs_rtfree_blocks(tp, del->br_startblock, + del->br_blockcount); if (error) - goto done; + return error; } do_fx = 0; - nblks = len * mp->m_sb.sb_rextsize; qfield = XFS_TRANS_DQ_RTBCOUNT; } else { do_fx = 1; - nblks = del->br_blockcount; qfield = XFS_TRANS_DQ_BCOUNT; } + nblks = del->br_blockcount; del_endblock = del->br_startblock + del->br_blockcount; if (cur) { error = xfs_bmbt_lookup_eq(cur, &got, &i); if (error) - goto done; - if (XFS_IS_CORRUPT(mp, i != 1)) { - error = -EFSCORRUPTED; - goto done; - } + return error; + if (XFS_IS_CORRUPT(mp, i != 1)) + return -EFSCORRUPTED; } if (got.br_startoff == del->br_startoff) @@ -5090,17 +5168,15 @@ xfs_bmap_del_extent_real( xfs_iext_prev(ifp, icur); ifp->if_nextents--; - flags |= XFS_ILOG_CORE; + *logflagsp |= XFS_ILOG_CORE; if (!cur) { - flags |= xfs_ilog_fext(whichfork); + *logflagsp |= xfs_ilog_fext(whichfork); break; } if ((error = xfs_btree_delete(cur, &i))) - goto done; - if (XFS_IS_CORRUPT(mp, i != 1)) { - error = -EFSCORRUPTED; - goto done; - } + return error; + if (XFS_IS_CORRUPT(mp, i != 1)) + return -EFSCORRUPTED; break; case BMAP_LEFT_FILLING: /* @@ -5111,12 +5187,12 @@ xfs_bmap_del_extent_real( got.br_blockcount -= del->br_blockcount; xfs_iext_update_extent(ip, state, icur, &got); if (!cur) { - flags |= xfs_ilog_fext(whichfork); + *logflagsp |= xfs_ilog_fext(whichfork); break; } error = xfs_bmbt_update(cur, &got); if (error) - goto done; + return error; break; case BMAP_RIGHT_FILLING: /* @@ -5125,12 +5201,12 @@ xfs_bmap_del_extent_real( got.br_blockcount -= del->br_blockcount; xfs_iext_update_extent(ip, state, icur, &got); if (!cur) { - flags |= xfs_ilog_fext(whichfork); + *logflagsp |= xfs_ilog_fext(whichfork); break; } error = xfs_bmbt_update(cur, &got); if (error) - goto done; + return error; break; case 0: /* @@ -5147,18 +5223,18 @@ xfs_bmap_del_extent_real( new.br_state = got.br_state; new.br_startblock = del_endblock; - flags |= XFS_ILOG_CORE; + *logflagsp |= XFS_ILOG_CORE; if (cur) { error = xfs_bmbt_update(cur, &got); if (error) - goto done; + return error; error = xfs_btree_increment(cur, 0, &i); if (error) - goto done; + return error; cur->bc_rec.b = new; error = xfs_btree_insert(cur, &i); if (error && error != -ENOSPC) - goto done; + return error; /* * If get no-space back from btree insert, it tried a * split, and we have a zero block reservation. Fix up @@ -5171,33 +5247,28 @@ xfs_bmap_del_extent_real( */ error = xfs_bmbt_lookup_eq(cur, &got, &i); if (error) - goto done; - if (XFS_IS_CORRUPT(mp, i != 1)) { - error = -EFSCORRUPTED; - goto done; - } + return error; + if (XFS_IS_CORRUPT(mp, i != 1)) + return -EFSCORRUPTED; /* * Update the btree record back * to the original value. */ error = xfs_bmbt_update(cur, &old); if (error) - goto done; + return error; /* * Reset the extent record back * to the original value. */ xfs_iext_update_extent(ip, state, icur, &old); - flags = 0; - error = -ENOSPC; - goto done; - } - if (XFS_IS_CORRUPT(mp, i != 1)) { - error = -EFSCORRUPTED; - goto done; + *logflagsp = 0; + return -ENOSPC; } + if (XFS_IS_CORRUPT(mp, i != 1)) + return -EFSCORRUPTED; } else - flags |= xfs_ilog_fext(whichfork); + *logflagsp |= xfs_ilog_fext(whichfork); ifp->if_nextents++; xfs_iext_next(ifp, icur); @@ -5215,10 +5286,13 @@ xfs_bmap_del_extent_real( if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) { xfs_refcount_decrease_extent(tp, del); } else { - __xfs_free_extent_later(tp, del->br_startblock, + error = __xfs_free_extent_later(tp, del->br_startblock, del->br_blockcount, NULL, - (bflags & XFS_BMAPI_NODISCARD) || - del->br_state == XFS_EXT_UNWRITTEN); + XFS_AG_RESV_NONE, + ((bflags & XFS_BMAPI_NODISCARD) || + del->br_state == XFS_EXT_UNWRITTEN)); + if (error) + return error; } } @@ -5233,9 +5307,7 @@ xfs_bmap_del_extent_real( if (qfield && !(bflags & XFS_BMAPI_REMAP)) xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks); -done: - *logflagsp = flags; - return error; + return 0; } /* @@ -6132,39 +6204,37 @@ xfs_bmap_unmap_extent( int xfs_bmap_finish_one( struct xfs_trans *tp, - struct xfs_inode *ip, - enum xfs_bmap_intent_type type, - int whichfork, - xfs_fileoff_t startoff, - xfs_fsblock_t startblock, - xfs_filblks_t *blockcount, - xfs_exntst_t state) + struct xfs_bmap_intent *bi) { + struct xfs_bmbt_irec *bmap = &bi->bi_bmap; int error = 0; ASSERT(tp->t_firstblock == NULLFSBLOCK); trace_xfs_bmap_deferred(tp->t_mountp, - XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type, - XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), - ip->i_ino, whichfork, startoff, *blockcount, state); + XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock), + bi->bi_type, + XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock), + bi->bi_owner->i_ino, bi->bi_whichfork, + bmap->br_startoff, bmap->br_blockcount, + bmap->br_state); - if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK)) + if (WARN_ON_ONCE(bi->bi_whichfork != XFS_DATA_FORK)) return -EFSCORRUPTED; if (XFS_TEST_ERROR(false, tp->t_mountp, XFS_ERRTAG_BMAP_FINISH_ONE)) return -EIO; - switch (type) { + switch (bi->bi_type) { case XFS_BMAP_MAP: - error = xfs_bmapi_remap(tp, ip, startoff, *blockcount, - startblock, 0); - *blockcount = 0; + error = xfs_bmapi_remap(tp, bi->bi_owner, bmap->br_startoff, + bmap->br_blockcount, bmap->br_startblock, 0); + bmap->br_blockcount = 0; break; case XFS_BMAP_UNMAP: - error = __xfs_bunmapi(tp, ip, startoff, blockcount, - XFS_BMAPI_REMAP, 1); + error = __xfs_bunmapi(tp, bi->bi_owner, bmap->br_startoff, + &bmap->br_blockcount, XFS_BMAPI_REMAP, 1); break; default: ASSERT(0); diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index 08c16e4edc0f..524912f276f8 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -236,10 +236,7 @@ struct xfs_bmap_intent { struct xfs_bmbt_irec bi_bmap; }; -int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_inode *ip, - enum xfs_bmap_intent_type type, int whichfork, - xfs_fileoff_t startoff, xfs_fsblock_t startblock, - xfs_filblks_t *blockcount, xfs_exntst_t state); +int xfs_bmap_finish_one(struct xfs_trans *tp, struct xfs_bmap_intent *bi); void xfs_bmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip, struct xfs_bmbt_irec *imap); void xfs_bmap_unmap_extent(struct xfs_trans *tp, struct xfs_inode *ip, diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index 18de4fbfef4e..57f401f2492d 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -285,11 +285,15 @@ xfs_bmbt_free_block( struct xfs_trans *tp = cur->bc_tp; xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); struct xfs_owner_info oinfo; + int error; xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork); - xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo); - ip->i_nblocks--; + error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo, + XFS_AG_RESV_NONE); + if (error) + return error; + ip->i_nblocks--; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L); return 0; diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c index 6b084b3cac83..f5c3be85b135 100644 --- a/fs/xfs/libxfs/xfs_btree.c +++ b/fs/xfs/libxfs/xfs_btree.c @@ -3430,14 +3430,31 @@ xfs_btree_insrec( xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS); /* - * If we just inserted into a new tree block, we have to - * recalculate nkey here because nkey is out of date. + * Update btree keys to reflect the newly added record or keyptr. + * There are three cases here to be aware of. Normally, all we have to + * do is walk towards the root, updating keys as necessary. * - * Otherwise we're just updating an existing block (having shoved - * some records into the new tree block), so use the regular key - * update mechanism. + * If the caller had us target a full block for the insertion, we dealt + * with that by calling the _make_block_unfull function. If the + * "make unfull" function splits the block, it'll hand us back the key + * and pointer of the new block. We haven't yet added the new block to + * the next level up, so if we decide to add the new record to the new + * block (bp->b_bn != old_bn), we have to update the caller's pointer + * so that the caller adds the new block with the correct key. + * + * However, there is a third possibility-- if the selected block is the + * root block of an inode-rooted btree and cannot be expanded further, + * the "make unfull" function moves the root block contents to a new + * block and updates the root block to point to the new block. In this + * case, no block pointer is passed back because the block has already + * been added to the btree. In this case, we need to use the regular + * key update function, just like the first case. This is critical for + * overlapping btrees, because the high key must be updated to reflect + * the entire tree, not just the subtree accessible through the first + * child of the root (which is now two levels down from the root). */ - if (bp && xfs_buf_daddr(bp) != old_bn) { + if (!xfs_btree_ptr_is_null(cur, &nptr) && + bp && xfs_buf_daddr(bp) != old_bn) { xfs_btree_get_keys(cur, block, lkey); } else if (xfs_btree_needs_key_update(cur, optr)) { error = xfs_btree_update_keys(cur, level); diff --git a/fs/xfs/libxfs/xfs_btree_staging.c b/fs/xfs/libxfs/xfs_btree_staging.c index dd75e208b543..29e3f8ccb185 100644 --- a/fs/xfs/libxfs/xfs_btree_staging.c +++ b/fs/xfs/libxfs/xfs_btree_staging.c @@ -342,9 +342,7 @@ xfs_btree_bload_drop_buf( if (*bpp == NULL) return; - if (!xfs_buf_delwri_queue(*bpp, buffers_list)) - ASSERT(0); - + xfs_buf_delwri_queue_here(*bpp, buffers_list); xfs_buf_relse(*bpp); *bpp = NULL; } diff --git a/fs/xfs/libxfs/xfs_btree_staging.h b/fs/xfs/libxfs/xfs_btree_staging.h index f0d2976050ae..5f638f711246 100644 --- a/fs/xfs/libxfs/xfs_btree_staging.h +++ b/fs/xfs/libxfs/xfs_btree_staging.h @@ -37,12 +37,6 @@ struct xbtree_ifakeroot { /* Number of bytes available for this fork in the inode. */ unsigned int if_fork_size; - - /* Fork format. */ - unsigned int if_format; - - /* Number of records. */ - unsigned int if_extents; }; /* Cursor interactions with fake roots for inode-rooted btrees. */ diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index e576560b46e9..12e3cca804b7 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c @@ -2158,8 +2158,8 @@ xfs_da_grow_inode_int( struct xfs_inode *dp = args->dp; int w = args->whichfork; xfs_rfsblock_t nblks = dp->i_nblocks; - struct xfs_bmbt_irec map, *mapp; - int nmap, error, got, i, mapi; + struct xfs_bmbt_irec map, *mapp = ↦ + int nmap, error, got, i, mapi = 1; /* * Find a spot in the file space to put the new block. @@ -2175,14 +2175,7 @@ xfs_da_grow_inode_int( error = xfs_bmapi_write(tp, dp, *bno, count, xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG, args->total, &map, &nmap); - if (error) - return error; - - ASSERT(nmap <= 1); - if (nmap == 1) { - mapp = ↦ - mapi = 1; - } else if (nmap == 0 && count > 1) { + if (error == -ENOSPC && count > 1) { xfs_fileoff_t b; int c; @@ -2199,16 +2192,13 @@ xfs_da_grow_inode_int( args->total, &mapp[mapi], &nmap); if (error) goto out_free_map; - if (nmap < 1) - break; mapi += nmap; b = mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount; } - } else { - mapi = 0; - mapp = NULL; } + if (error) + goto out_free_map; /* * Count the blocks we got, make sure it matches the total. @@ -2316,10 +2306,17 @@ xfs_da3_swap_lastblock( return error; /* * Copy the last block into the dead buffer and log it. + * On CRC-enabled file systems, also update the stamped in blkno. */ memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize); + if (xfs_has_crc(mp)) { + struct xfs_da3_blkinfo *da3 = dead_buf->b_addr; + + da3->blkno = cpu_to_be64(xfs_buf_daddr(dead_buf)); + } xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1); dead_info = dead_buf->b_addr; + /* * Get values from the moved block. */ diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c index 5a321b783398..92470ed3fcbd 100644 --- a/fs/xfs/libxfs/xfs_defer.c +++ b/fs/xfs/libxfs/xfs_defer.c @@ -245,26 +245,33 @@ xfs_defer_create_intents( return ret; } +STATIC void +xfs_defer_pending_abort( + struct xfs_mount *mp, + struct list_head *dop_list) +{ + struct xfs_defer_pending *dfp; + const struct xfs_defer_op_type *ops; + + /* Abort intent items that don't have a done item. */ + list_for_each_entry(dfp, dop_list, dfp_list) { + ops = defer_op_types[dfp->dfp_type]; + trace_xfs_defer_pending_abort(mp, dfp); + if (dfp->dfp_intent && !dfp->dfp_done) { + ops->abort_intent(dfp->dfp_intent); + dfp->dfp_intent = NULL; + } + } +} + /* Abort all the intents that were committed. */ STATIC void xfs_defer_trans_abort( struct xfs_trans *tp, struct list_head *dop_pending) { - struct xfs_defer_pending *dfp; - const struct xfs_defer_op_type *ops; - trace_xfs_defer_trans_abort(tp, _RET_IP_); - - /* Abort intent items that don't have a done item. */ - list_for_each_entry(dfp, dop_pending, dfp_list) { - ops = defer_op_types[dfp->dfp_type]; - trace_xfs_defer_pending_abort(tp->t_mountp, dfp); - if (dfp->dfp_intent && !dfp->dfp_done) { - ops->abort_intent(dfp->dfp_intent); - dfp->dfp_intent = NULL; - } - } + xfs_defer_pending_abort(tp->t_mountp, dop_pending); } /* @@ -754,12 +761,13 @@ xfs_defer_ops_capture( /* Release all resources that we used to capture deferred ops. */ void -xfs_defer_ops_capture_free( +xfs_defer_ops_capture_abort( struct xfs_mount *mp, struct xfs_defer_capture *dfc) { unsigned short i; + xfs_defer_pending_abort(mp, &dfc->dfc_dfops); xfs_defer_cancel_list(mp, &dfc->dfc_dfops); for (i = 0; i < dfc->dfc_held.dr_bufs; i++) @@ -800,7 +808,7 @@ xfs_defer_ops_capture_and_commit( /* Commit the transaction and add the capture structure to the list. */ error = xfs_trans_commit(tp); if (error) { - xfs_defer_ops_capture_free(mp, dfc); + xfs_defer_ops_capture_abort(mp, dfc); return error; } diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h index 114a3a4930a3..8788ad5f6a73 100644 --- a/fs/xfs/libxfs/xfs_defer.h +++ b/fs/xfs/libxfs/xfs_defer.h @@ -121,7 +121,7 @@ int xfs_defer_ops_capture_and_commit(struct xfs_trans *tp, struct list_head *capture_list); void xfs_defer_ops_continue(struct xfs_defer_capture *d, struct xfs_trans *tp, struct xfs_defer_resources *dres); -void xfs_defer_ops_capture_free(struct xfs_mount *mp, +void xfs_defer_ops_capture_abort(struct xfs_mount *mp, struct xfs_defer_capture *d); void xfs_defer_resources_rele(struct xfs_defer_resources *dres); diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index 371dc07233e0..20acb8573d7a 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -98,7 +98,7 @@ typedef struct xfs_sb { uint32_t sb_blocksize; /* logical block size, bytes */ xfs_rfsblock_t sb_dblocks; /* number of data blocks */ xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */ - xfs_rtblock_t sb_rextents; /* number of realtime extents */ + xfs_rtbxlen_t sb_rextents; /* number of realtime extents */ uuid_t sb_uuid; /* user-visible file system unique id */ xfs_fsblock_t sb_logstart; /* starting block of log if internal */ xfs_ino_t sb_rootino; /* root inode number */ diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 120dbec16f5c..d1472cbd48ff 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -1827,7 +1827,7 @@ xfs_dialloc( * might be sparse and only free the regions that are allocated as part of the * chunk. */ -STATIC void +static int xfs_difree_inode_chunk( struct xfs_trans *tp, xfs_agnumber_t agno, @@ -1844,10 +1844,10 @@ xfs_difree_inode_chunk( if (!xfs_inobt_issparse(rec->ir_holemask)) { /* not sparse, calculate extent info directly */ - xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, sagbno), - M_IGEO(mp)->ialloc_blks, - &XFS_RMAP_OINFO_INODES); - return; + return xfs_free_extent_later(tp, + XFS_AGB_TO_FSB(mp, agno, sagbno), + M_IGEO(mp)->ialloc_blks, &XFS_RMAP_OINFO_INODES, + XFS_AG_RESV_NONE); } /* holemask is only 16-bits (fits in an unsigned long) */ @@ -1864,6 +1864,8 @@ xfs_difree_inode_chunk( XFS_INOBT_HOLEMASK_BITS); nextbit = startidx + 1; while (startidx < XFS_INOBT_HOLEMASK_BITS) { + int error; + nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS, nextbit); /* @@ -1889,8 +1891,11 @@ xfs_difree_inode_chunk( ASSERT(agbno % mp->m_sb.sb_spino_align == 0); ASSERT(contigblk % mp->m_sb.sb_spino_align == 0); - xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, agbno), - contigblk, &XFS_RMAP_OINFO_INODES); + error = xfs_free_extent_later(tp, + XFS_AGB_TO_FSB(mp, agno, agbno), contigblk, + &XFS_RMAP_OINFO_INODES, XFS_AG_RESV_NONE); + if (error) + return error; /* reset range to current bit and carry on... */ startidx = endidx = nextbit; @@ -1898,6 +1903,7 @@ xfs_difree_inode_chunk( next: nextbit++; } + return 0; } STATIC int @@ -1998,7 +2004,9 @@ xfs_difree_inobt( goto error0; } - xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); + error = xfs_difree_inode_chunk(tp, pag->pag_agno, &rec); + if (error) + goto error0; } else { xic->deleted = false; diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 8c83e265770c..7125447cde1a 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -156,9 +156,11 @@ __xfs_inobt_free_block( struct xfs_buf *bp, enum xfs_ag_resv_type resv) { + xfs_fsblock_t fsbno; + xfs_inobt_mod_blockcount(cur, -1); - return xfs_free_extent(cur->bc_tp, - XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)), 1, + fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, xfs_buf_daddr(bp)); + return xfs_free_extent_later(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_INOBT, resv); } diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 758aacd8166b..3c611c8ac158 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -365,17 +365,40 @@ xfs_dinode_verify_fork( /* * For fork types that can contain local data, check that the fork * format matches the size of local data contained within the fork. - * - * For all types, check that when the size says the should be in extent - * or btree format, the inode isn't claiming it is in local format. */ if (whichfork == XFS_DATA_FORK) { - if (S_ISDIR(mode) || S_ISLNK(mode)) { - if (be64_to_cpu(dip->di_size) <= fork_size && + /* + * A directory small enough to fit in the inode must be stored + * in local format. The directory sf <-> extents conversion + * code updates the directory size accordingly. Directories + * being truncated have zero size and are not subject to this + * check. + */ + if (S_ISDIR(mode)) { + if (dip->di_size && + be64_to_cpu(dip->di_size) <= fork_size && fork_format != XFS_DINODE_FMT_LOCAL) return __this_address; } + /* + * A symlink with a target small enough to fit in the inode can + * be stored in extents format if xattrs were added (thus + * converting the data fork from shortform to remote format) + * and then removed. + */ + if (S_ISLNK(mode)) { + if (be64_to_cpu(dip->di_size) <= fork_size && + fork_format != XFS_DINODE_FMT_EXTENTS && + fork_format != XFS_DINODE_FMT_LOCAL) + return __this_address; + } + + /* + * For all types, check that when the size says the fork should + * be in extent or btree format, the inode isn't claiming to be + * in local format. + */ if (be64_to_cpu(dip->di_size) > fork_size && fork_format == XFS_DINODE_FMT_LOCAL) return __this_address; @@ -491,9 +514,19 @@ xfs_dinode_verify( if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN) return __this_address; - /* No zero-length symlinks/dirs. */ - if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) - return __this_address; + /* + * No zero-length symlinks/dirs unless they're unlinked and hence being + * inactivated. + */ + if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0) { + if (dip->di_version > 1) { + if (dip->di_nlink) + return __this_address; + } else { + if (dip->di_onlink) + return __this_address; + } + } fa = xfs_dinode_verify_nrext64(mp, dip); if (fa) @@ -507,6 +540,9 @@ xfs_dinode_verify( if (mode && nextents + naextents > nblocks) return __this_address; + if (nextents + naextents == 0 && nblocks != 0) + return __this_address; + if (S_ISDIR(mode) && nextents > mp->m_dir_geo->max_extents) return __this_address; diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h index 2420865f3007..a5100a11faf9 100644 --- a/fs/xfs/libxfs/xfs_log_recover.h +++ b/fs/xfs/libxfs/xfs_log_recover.h @@ -131,4 +131,26 @@ void xlog_check_buf_cancel_table(struct xlog *log); #define xlog_check_buf_cancel_table(log) do { } while (0) #endif +/* + * Transform a regular reservation into one suitable for recovery of a log + * intent item. + * + * Intent recovery only runs a single step of the transaction chain and defers + * the rest to a separate transaction. Therefore, we reduce logcount to 1 here + * to avoid livelocks if the log grant space is nearly exhausted due to the + * recovered intent pinning the tail. Keep the same logflags to avoid tripping + * asserts elsewhere. Struct copies abound below. + */ +static inline struct xfs_trans_res +xlog_recover_resv(const struct xfs_trans_res *r) +{ + struct xfs_trans_res ret = { + .tr_logres = r->tr_logres, + .tr_logcount = 1, + .tr_logflags = r->tr_logflags, + }; + + return ret; +} + #endif /* __XFS_LOG_RECOVER_H__ */ diff --git a/fs/xfs/libxfs/xfs_refcount.c b/fs/xfs/libxfs/xfs_refcount.c index 6f7ed9288fe4..4ec7a81dd3ef 100644 --- a/fs/xfs/libxfs/xfs_refcount.c +++ b/fs/xfs/libxfs/xfs_refcount.c @@ -1129,8 +1129,11 @@ xfs_refcount_adjust_extents( fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, tmp.rc_startblock); - xfs_free_extent_later(cur->bc_tp, fsbno, - tmp.rc_blockcount, NULL); + error = xfs_free_extent_later(cur->bc_tp, fsbno, + tmp.rc_blockcount, NULL, + XFS_AG_RESV_NONE); + if (error) + goto out_error; } (*agbno) += tmp.rc_blockcount; @@ -1188,8 +1191,11 @@ xfs_refcount_adjust_extents( fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, ext.rc_startblock); - xfs_free_extent_later(cur->bc_tp, fsbno, - ext.rc_blockcount, NULL); + error = xfs_free_extent_later(cur->bc_tp, fsbno, + ext.rc_blockcount, NULL, + XFS_AG_RESV_NONE); + if (error) + goto out_error; } skip: @@ -1213,37 +1219,33 @@ out_error: STATIC int xfs_refcount_adjust( struct xfs_btree_cur *cur, - xfs_agblock_t agbno, - xfs_extlen_t aglen, - xfs_agblock_t *new_agbno, - xfs_extlen_t *new_aglen, + xfs_agblock_t *agbno, + xfs_extlen_t *aglen, enum xfs_refc_adjust_op adj) { bool shape_changed; int shape_changes = 0; int error; - *new_agbno = agbno; - *new_aglen = aglen; if (adj == XFS_REFCOUNT_ADJUST_INCREASE) - trace_xfs_refcount_increase(cur->bc_mp, cur->bc_ag.pag->pag_agno, - agbno, aglen); + trace_xfs_refcount_increase(cur->bc_mp, + cur->bc_ag.pag->pag_agno, *agbno, *aglen); else - trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_ag.pag->pag_agno, - agbno, aglen); + trace_xfs_refcount_decrease(cur->bc_mp, + cur->bc_ag.pag->pag_agno, *agbno, *aglen); /* * Ensure that no rcextents cross the boundary of the adjustment range. */ error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED, - agbno, &shape_changed); + *agbno, &shape_changed); if (error) goto out_error; if (shape_changed) shape_changes++; error = xfs_refcount_split_extent(cur, XFS_REFC_DOMAIN_SHARED, - agbno + aglen, &shape_changed); + *agbno + *aglen, &shape_changed); if (error) goto out_error; if (shape_changed) @@ -1253,7 +1255,7 @@ xfs_refcount_adjust( * Try to merge with the left or right extents of the range. */ error = xfs_refcount_merge_extents(cur, XFS_REFC_DOMAIN_SHARED, - new_agbno, new_aglen, adj, &shape_changed); + agbno, aglen, adj, &shape_changed); if (error) goto out_error; if (shape_changed) @@ -1262,7 +1264,7 @@ xfs_refcount_adjust( cur->bc_ag.refc.shape_changes++; /* Now that we've taken care of the ends, adjust the middle extents */ - error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen, adj); + error = xfs_refcount_adjust_extents(cur, agbno, aglen, adj); if (error) goto out_error; @@ -1298,21 +1300,20 @@ xfs_refcount_finish_one_cleanup( static inline int xfs_refcount_continue_op( struct xfs_btree_cur *cur, - xfs_fsblock_t startblock, - xfs_agblock_t new_agbno, - xfs_extlen_t new_len, - xfs_fsblock_t *new_fsbno) + struct xfs_refcount_intent *ri, + xfs_agblock_t new_agbno) { struct xfs_mount *mp = cur->bc_mp; struct xfs_perag *pag = cur->bc_ag.pag; - if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno, new_len))) + if (XFS_IS_CORRUPT(mp, !xfs_verify_agbext(pag, new_agbno, + ri->ri_blockcount))) return -EFSCORRUPTED; - *new_fsbno = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno); + ri->ri_startblock = XFS_AGB_TO_FSB(mp, pag->pag_agno, new_agbno); - ASSERT(xfs_verify_fsbext(mp, *new_fsbno, new_len)); - ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, *new_fsbno)); + ASSERT(xfs_verify_fsbext(mp, ri->ri_startblock, ri->ri_blockcount)); + ASSERT(pag->pag_agno == XFS_FSB_TO_AGNO(mp, ri->ri_startblock)); return 0; } @@ -1327,11 +1328,7 @@ xfs_refcount_continue_op( int xfs_refcount_finish_one( struct xfs_trans *tp, - enum xfs_refcount_intent_type type, - xfs_fsblock_t startblock, - xfs_extlen_t blockcount, - xfs_fsblock_t *new_fsb, - xfs_extlen_t *new_len, + struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur) { struct xfs_mount *mp = tp->t_mountp; @@ -1339,17 +1336,16 @@ xfs_refcount_finish_one( struct xfs_buf *agbp = NULL; int error = 0; xfs_agblock_t bno; - xfs_agblock_t new_agbno; unsigned long nr_ops = 0; int shape_changes = 0; struct xfs_perag *pag; - pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, startblock)); - bno = XFS_FSB_TO_AGBNO(mp, startblock); + pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, ri->ri_startblock)); + bno = XFS_FSB_TO_AGBNO(mp, ri->ri_startblock); - trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock), - type, XFS_FSB_TO_AGBNO(mp, startblock), - blockcount); + trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, ri->ri_startblock), + ri->ri_type, XFS_FSB_TO_AGBNO(mp, ri->ri_startblock), + ri->ri_blockcount); if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_REFCOUNT_FINISH_ONE)) { error = -EIO; @@ -1380,42 +1376,42 @@ xfs_refcount_finish_one( } *pcur = rcur; - switch (type) { + switch (ri->ri_type) { case XFS_REFCOUNT_INCREASE: - error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno, - new_len, XFS_REFCOUNT_ADJUST_INCREASE); + error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount, + XFS_REFCOUNT_ADJUST_INCREASE); if (error) goto out_drop; - if (*new_len > 0) - error = xfs_refcount_continue_op(rcur, startblock, - new_agbno, *new_len, new_fsb); + if (ri->ri_blockcount > 0) + error = xfs_refcount_continue_op(rcur, ri, bno); break; case XFS_REFCOUNT_DECREASE: - error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno, - new_len, XFS_REFCOUNT_ADJUST_DECREASE); + error = xfs_refcount_adjust(rcur, &bno, &ri->ri_blockcount, + XFS_REFCOUNT_ADJUST_DECREASE); if (error) goto out_drop; - if (*new_len > 0) - error = xfs_refcount_continue_op(rcur, startblock, - new_agbno, *new_len, new_fsb); + if (ri->ri_blockcount > 0) + error = xfs_refcount_continue_op(rcur, ri, bno); break; case XFS_REFCOUNT_ALLOC_COW: - *new_fsb = startblock + blockcount; - *new_len = 0; - error = __xfs_refcount_cow_alloc(rcur, bno, blockcount); + error = __xfs_refcount_cow_alloc(rcur, bno, ri->ri_blockcount); + if (error) + goto out_drop; + ri->ri_blockcount = 0; break; case XFS_REFCOUNT_FREE_COW: - *new_fsb = startblock + blockcount; - *new_len = 0; - error = __xfs_refcount_cow_free(rcur, bno, blockcount); + error = __xfs_refcount_cow_free(rcur, bno, ri->ri_blockcount); + if (error) + goto out_drop; + ri->ri_blockcount = 0; break; default: ASSERT(0); error = -EFSCORRUPTED; } - if (!error && *new_len > 0) - trace_xfs_refcount_finish_one_leftover(mp, pag->pag_agno, type, - bno, blockcount, new_agbno, *new_len); + if (!error && ri->ri_blockcount > 0) + trace_xfs_refcount_finish_one_leftover(mp, pag->pag_agno, + ri->ri_type, bno, ri->ri_blockcount); out_drop: xfs_perag_put(pag); return error; @@ -1968,7 +1964,11 @@ xfs_refcount_recover_cow_leftovers( rr->rr_rrec.rc_blockcount); /* Free the block. */ - xfs_free_extent_later(tp, fsb, rr->rr_rrec.rc_blockcount, NULL); + error = xfs_free_extent_later(tp, fsb, + rr->rr_rrec.rc_blockcount, NULL, + XFS_AG_RESV_NONE); + if (error) + goto out_trans; error = xfs_trans_commit(tp); if (error) diff --git a/fs/xfs/libxfs/xfs_refcount.h b/fs/xfs/libxfs/xfs_refcount.h index 452f30556f5a..c633477ce3ce 100644 --- a/fs/xfs/libxfs/xfs_refcount.h +++ b/fs/xfs/libxfs/xfs_refcount.h @@ -75,9 +75,7 @@ void xfs_refcount_decrease_extent(struct xfs_trans *tp, extern void xfs_refcount_finish_one_cleanup(struct xfs_trans *tp, struct xfs_btree_cur *rcur, int error); extern int xfs_refcount_finish_one(struct xfs_trans *tp, - enum xfs_refcount_intent_type type, xfs_fsblock_t startblock, - xfs_extlen_t blockcount, xfs_fsblock_t *new_fsb, - xfs_extlen_t *new_len, struct xfs_btree_cur **pcur); + struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur); extern int xfs_refcount_find_shared(struct xfs_btree_cur *cur, xfs_agblock_t agbno, xfs_extlen_t aglen, xfs_agblock_t *fbno, diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index e1f789866683..fbd53b6951a9 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c @@ -106,18 +106,13 @@ xfs_refcountbt_free_block( struct xfs_buf *agbp = cur->bc_ag.agbp; struct xfs_agf *agf = agbp->b_addr; xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp)); - int error; trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno, XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1); be32_add_cpu(&agf->agf_refcount_blocks, -1); xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS); - error = xfs_free_extent(cur->bc_tp, fsbno, 1, &XFS_RMAP_OINFO_REFC, - XFS_AG_RESV_METADATA); - if (error) - return error; - - return error; + return xfs_free_extent_later(cur->bc_tp, fsbno, 1, + &XFS_RMAP_OINFO_REFC, XFS_AG_RESV_METADATA); } STATIC int diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c index fa180ab66b73..760172a65aff 100644 --- a/fs/xfs/libxfs/xfs_rtbitmap.c +++ b/fs/xfs/libxfs/xfs_rtbitmap.c @@ -16,6 +16,7 @@ #include "xfs_trans.h" #include "xfs_rtalloc.h" #include "xfs_error.h" +#include "xfs_rtbitmap.h" /* * Realtime allocator bitmap functions shared with userspace. @@ -1005,6 +1006,39 @@ xfs_rtfree_extent( return 0; } +/* + * Free some blocks in the realtime subvolume. rtbno and rtlen are in units of + * rt blocks, not rt extents; must be aligned to the rt extent size; and rtlen + * cannot exceed XFS_MAX_BMBT_EXTLEN. + */ +int +xfs_rtfree_blocks( + struct xfs_trans *tp, + xfs_fsblock_t rtbno, + xfs_filblks_t rtlen) +{ + struct xfs_mount *mp = tp->t_mountp; + xfs_rtblock_t bno; + xfs_filblks_t len; + xfs_extlen_t mod; + + ASSERT(rtlen <= XFS_MAX_BMBT_EXTLEN); + + len = div_u64_rem(rtlen, mp->m_sb.sb_rextsize, &mod); + if (mod) { + ASSERT(mod == 0); + return -EIO; + } + + bno = div_u64_rem(rtbno, mp->m_sb.sb_rextsize, &mod); + if (mod) { + ASSERT(mod == 0); + return -EIO; + } + + return xfs_rtfree_extent(tp, bno, len); +} + /* Find all the free records within a given range. */ int xfs_rtalloc_query_range( @@ -1096,3 +1130,4 @@ xfs_rtalloc_extent_is_free( *is_free = matches; return 0; } + diff --git a/fs/xfs/libxfs/xfs_rtbitmap.h b/fs/xfs/libxfs/xfs_rtbitmap.h new file mode 100644 index 000000000000..b89712983347 --- /dev/null +++ b/fs/xfs/libxfs/xfs_rtbitmap.h @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. + * All Rights Reserved. + */ +#ifndef __XFS_RTBITMAP_H__ +#define __XFS_RTBITMAP_H__ + +/* + * XXX: Most of the realtime allocation functions deal in units of realtime + * extents, not realtime blocks. This looks funny when paired with the type + * name and screams for a larger cleanup. + */ +struct xfs_rtalloc_rec { + xfs_rtblock_t ar_startext; + xfs_rtbxlen_t ar_extcount; +}; + +typedef int (*xfs_rtalloc_query_range_fn)( + struct xfs_mount *mp, + struct xfs_trans *tp, + const struct xfs_rtalloc_rec *rec, + void *priv); + +#ifdef CONFIG_XFS_RT +int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_rtblock_t block, int issum, struct xfs_buf **bpp); +int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_rtblock_t start, xfs_extlen_t len, int val, + xfs_rtblock_t *new, int *stat); +int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_rtblock_t start, xfs_rtblock_t limit, + xfs_rtblock_t *rtblock); +int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_rtblock_t start, xfs_rtblock_t limit, + xfs_rtblock_t *rtblock); +int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_rtblock_t start, xfs_extlen_t len, int val); +int xfs_rtmodify_summary_int(struct xfs_mount *mp, struct xfs_trans *tp, + int log, xfs_rtblock_t bbno, int delta, + struct xfs_buf **rbpp, xfs_fsblock_t *rsb, + xfs_suminfo_t *sum); +int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log, + xfs_rtblock_t bbno, int delta, struct xfs_buf **rbpp, + xfs_fsblock_t *rsb); +int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_rtblock_t start, xfs_extlen_t len, + struct xfs_buf **rbpp, xfs_fsblock_t *rsb); +int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp, + const struct xfs_rtalloc_rec *low_rec, + const struct xfs_rtalloc_rec *high_rec, + xfs_rtalloc_query_range_fn fn, void *priv); +int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_rtalloc_query_range_fn fn, + void *priv); +bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno); +int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp, + xfs_rtblock_t start, xfs_extlen_t len, + bool *is_free); +/* + * Free an extent in the realtime subvolume. Length is expressed in + * realtime extents, as is the block number. + */ +int /* error */ +xfs_rtfree_extent( + struct xfs_trans *tp, /* transaction pointer */ + xfs_rtblock_t bno, /* starting block number to free */ + xfs_extlen_t len); /* length of extent freed */ + +/* Same as above, but in units of rt blocks. */ +int xfs_rtfree_blocks(struct xfs_trans *tp, xfs_fsblock_t rtbno, + xfs_filblks_t rtlen); + +#else /* CONFIG_XFS_RT */ +# define xfs_rtfree_extent(t,b,l) (-ENOSYS) +# define xfs_rtfree_blocks(t,rb,rl) (-ENOSYS) +# define xfs_rtalloc_query_range(m,t,l,h,f,p) (-ENOSYS) +# define xfs_rtalloc_query_all(m,t,f,p) (-ENOSYS) +# define xfs_rtbuf_get(m,t,b,i,p) (-ENOSYS) +# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (-ENOSYS) +#endif /* CONFIG_XFS_RT */ + +#endif /* __XFS_RTBITMAP_H__ */ diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index c24a38272cb7..8e0a176b8e0b 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -25,6 +25,7 @@ #include "xfs_da_format.h" #include "xfs_health.h" #include "xfs_ag.h" +#include "xfs_rtbitmap.h" /* * Physical superblock buffer manipulations. Shared with libxfs in userspace. @@ -259,13 +260,6 @@ xfs_validate_sb_write( * the kernel cannot support since we checked for unsupported bits in * the read verifier, which means that memory is corrupt. */ - if (xfs_sb_has_compat_feature(sbp, XFS_SB_FEAT_COMPAT_UNKNOWN)) { - xfs_warn(mp, -"Corruption detected in superblock compatible features (0x%x)!", - (sbp->sb_features_compat & XFS_SB_FEAT_COMPAT_UNKNOWN)); - return -EFSCORRUPTED; - } - if (!xfs_is_readonly(mp) && xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) { xfs_alert(mp, @@ -508,8 +502,9 @@ xfs_validate_sb_common( rbmblocks = howmany_64(sbp->sb_rextents, NBBY * sbp->sb_blocksize); - if (sbp->sb_rextents != rexts || - sbp->sb_rextslog != xfs_highbit32(sbp->sb_rextents) || + if (!xfs_validate_rtextents(rexts) || + sbp->sb_rextents != rexts || + sbp->sb_rextslog != xfs_compute_rextslog(rexts) || sbp->sb_rbmblocks != rbmblocks) { xfs_notice(mp, "realtime geometry sanity check failed"); @@ -1027,11 +1022,12 @@ xfs_log_sb( * and hence we don't need have to update it here. */ if (xfs_has_lazysbcount(mp)) { - mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount); + mp->m_sb.sb_icount = percpu_counter_sum_positive(&mp->m_icount); mp->m_sb.sb_ifree = min_t(uint64_t, - percpu_counter_sum(&mp->m_ifree), + percpu_counter_sum_positive(&mp->m_ifree), mp->m_sb.sb_icount); - mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks); + mp->m_sb.sb_fdblocks = + percpu_counter_sum_positive(&mp->m_fdblocks); } xfs_sb_to_disk(bp->b_addr, &mp->m_sb); @@ -1372,3 +1368,17 @@ xfs_validate_stripe_geometry( } return true; } + +/* + * Compute the maximum level number of the realtime summary file, as defined by + * mkfs. The historic use of highbit32 on a 64-bit quantity prohibited correct + * use of rt volumes with more than 2^32 extents. + */ +uint8_t +xfs_compute_rextslog( + xfs_rtbxlen_t rtextents) +{ + if (!rtextents) + return 0; + return xfs_highbit64(rtextents); +} diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h index a5e14740ec9a..2e8e8d63d4eb 100644 --- a/fs/xfs/libxfs/xfs_sb.h +++ b/fs/xfs/libxfs/xfs_sb.h @@ -25,7 +25,7 @@ extern uint64_t xfs_sb_version_to_features(struct xfs_sb *sbp); extern int xfs_update_secondary_sbs(struct xfs_mount *mp); -#define XFS_FS_GEOM_MAX_STRUCT_VER (4) +#define XFS_FS_GEOM_MAX_STRUCT_VER (5) extern void xfs_fs_geometry(struct xfs_mount *mp, struct xfs_fsop_geom *geo, int struct_version); extern int xfs_sb_read_secondary(struct xfs_mount *mp, @@ -38,4 +38,6 @@ extern int xfs_sb_get_secondary(struct xfs_mount *mp, extern bool xfs_validate_stripe_geometry(struct xfs_mount *mp, __s64 sunit, __s64 swidth, int sectorsize, bool silent); +uint8_t xfs_compute_rextslog(xfs_rtbxlen_t rtextents); + #endif /* __XFS_SB_H__ */ diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c index bdc777b9ec4a..8828d854e34f 100644 --- a/fs/xfs/libxfs/xfs_symlink_remote.c +++ b/fs/xfs/libxfs/xfs_symlink_remote.c @@ -89,8 +89,10 @@ xfs_symlink_verify( struct xfs_mount *mp = bp->b_mount; struct xfs_dsymlink_hdr *dsl = bp->b_addr; + /* no verification of non-crc buffers */ if (!xfs_has_crc(mp)) - return __this_address; + return NULL; + if (!xfs_verify_magic(bp, dsl->sl_magic)) return __this_address; if (!uuid_equal(&dsl->sl_uuid, &mp->m_sb.sb_meta_uuid)) diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h index 5ebdda7e1078..42fed04f038d 100644 --- a/fs/xfs/libxfs/xfs_types.h +++ b/fs/xfs/libxfs/xfs_types.h @@ -31,6 +31,7 @@ typedef uint64_t xfs_rfsblock_t; /* blockno in filesystem (raw) */ typedef uint64_t xfs_rtblock_t; /* extent (block) in realtime area */ typedef uint64_t xfs_fileoff_t; /* block number in a file */ typedef uint64_t xfs_filblks_t; /* number of blocks in a file */ +typedef uint64_t xfs_rtbxlen_t; /* rtbitmap extent length in rtextents */ typedef int64_t xfs_srtblock_t; /* signed version of xfs_rtblock_t */ @@ -227,4 +228,16 @@ bool xfs_verify_fileoff(struct xfs_mount *mp, xfs_fileoff_t off); bool xfs_verify_fileext(struct xfs_mount *mp, xfs_fileoff_t off, xfs_fileoff_t len); +/* Do we support an rt volume having this number of rtextents? */ +static inline bool +xfs_validate_rtextents( + xfs_rtbxlen_t rtextents) +{ + /* No runt rt volumes */ + if (rtextents == 0) + return false; + + return true; +} + #endif /* __XFS_TYPES_H__ */ diff --git a/fs/xfs/scrub/attr.c b/fs/xfs/scrub/attr.c index b6f0c9f3f124..f51771e5c3fe 100644 --- a/fs/xfs/scrub/attr.c +++ b/fs/xfs/scrub/attr.c @@ -159,6 +159,11 @@ xchk_xattr_listent( args.value = xchk_xattr_valuebuf(sx->sc); args.valuelen = valuelen; + /* + * Get the attr value to ensure that lookup can find this attribute + * through the dabtree indexing and that remote value retrieval also + * works correctly. + */ error = xfs_attr_get_ilocked(&args); /* ENODATA means the hash lookup failed and the attr is bad */ if (error == -ENODATA) diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index c18bd039fce9..e0ed0ebfdaea 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -582,7 +582,8 @@ xrep_reap_block( else if (resv == XFS_AG_RESV_AGFL) error = xrep_put_freelist(sc, agbno); else - error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv); + error = xfs_free_extent(sc->tp, sc->sa.pag, agbno, 1, oinfo, + resv); if (agf_bp != sc->sa.agf_bp) xfs_trans_brelse(sc->tp, agf_bp); if (error) diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c index 0a3bde64c675..fad7c353ada6 100644 --- a/fs/xfs/scrub/rtbitmap.c +++ b/fs/xfs/scrub/rtbitmap.c @@ -11,9 +11,10 @@ #include "xfs_mount.h" #include "xfs_log_format.h" #include "xfs_trans.h" -#include "xfs_rtalloc.h" +#include "xfs_rtbitmap.h" #include "xfs_inode.h" #include "xfs_bmap.h" +#include "xfs_sb.h" #include "scrub/scrub.h" #include "scrub/common.h" diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h index 93ece6df02e3..0afc978e9983 100644 --- a/fs/xfs/scrub/trace.h +++ b/fs/xfs/scrub/trace.h @@ -464,7 +464,7 @@ TRACE_EVENT(xchk_ifork_btree_error, TP_fast_assign( xfs_fsblock_t fsbno = xchk_btree_cur_fsbno(cur, level); __entry->dev = sc->mp->m_super->s_dev; - __entry->ino = sc->ip->i_ino; + __entry->ino = cur->bc_ino.ip->i_ino; __entry->whichfork = cur->bc_ino.whichfork; __entry->type = sc->sm->sm_type; __entry->btnum = cur->bc_btnum; diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 21c241e96d48..50a7f2745514 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -225,45 +225,6 @@ xfs_imap_valid( return true; } -/* - * Pass in a dellalloc extent and convert it to real extents, return the real - * extent that maps offset_fsb in wpc->iomap. - * - * The current page is held locked so nothing could have removed the block - * backing offset_fsb, although it could have moved from the COW to the data - * fork by another thread. - */ -static int -xfs_convert_blocks( - struct iomap_writepage_ctx *wpc, - struct xfs_inode *ip, - int whichfork, - loff_t offset) -{ - int error; - unsigned *seq; - - if (whichfork == XFS_COW_FORK) - seq = &XFS_WPC(wpc)->cow_seq; - else - seq = &XFS_WPC(wpc)->data_seq; - - /* - * Attempt to allocate whatever delalloc extent currently backs offset - * and put the result into wpc->iomap. Allocate in a loop because it - * may take several attempts to allocate real blocks for a contiguous - * delalloc extent if free space is sufficiently fragmented. - */ - do { - error = xfs_bmapi_convert_delalloc(ip, whichfork, offset, - &wpc->iomap, seq); - if (error) - return error; - } while (wpc->iomap.offset + wpc->iomap.length <= offset); - - return 0; -} - static int xfs_map_blocks( struct iomap_writepage_ctx *wpc, @@ -281,6 +242,7 @@ xfs_map_blocks( struct xfs_iext_cursor icur; int retries = 0; int error = 0; + unsigned int *seq; if (xfs_is_shutdown(mp)) return -EIO; @@ -376,7 +338,19 @@ retry: trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap); return 0; allocate_blocks: - error = xfs_convert_blocks(wpc, ip, whichfork, offset); + /* + * Convert a dellalloc extent to a real one. The current page is held + * locked so nothing could have removed the block backing offset_fsb, + * although it could have moved from the COW to the data fork by another + * thread. + */ + if (whichfork == XFS_COW_FORK) + seq = &XFS_WPC(wpc)->cow_seq; + else + seq = &XFS_WPC(wpc)->data_seq; + + error = xfs_bmapi_convert_delalloc(ip, whichfork, offset, + &wpc->iomap, seq); if (error) { /* * If we failed to find the extent in the COW fork we might have diff --git a/fs/xfs/xfs_attr_item.c b/fs/xfs/xfs_attr_item.c index 2788a6f2edcd..4a712f1565c1 100644 --- a/fs/xfs/xfs_attr_item.c +++ b/fs/xfs/xfs_attr_item.c @@ -329,6 +329,13 @@ xfs_xattri_finish_update( goto out; } + /* If an attr removal is trivially complete, we're done. */ + if (attr->xattri_op_flags == XFS_ATTRI_OP_FLAGS_REMOVE && + !xfs_inode_hasattr(args->dp)) { + error = 0; + goto out; + } + error = xfs_attr_set_iter(attr); if (!error && attr->xattri_dela_state != XFS_DAS_DONE) error = -EAGAIN; @@ -503,6 +510,9 @@ xfs_attri_validate( unsigned int op = attrp->alfi_op_flags & XFS_ATTRI_OP_FLAGS_TYPE_MASK; + if (!xfs_sb_version_haslogxattrs(&mp->m_sb)) + return false; + if (attrp->__pad != 0) return false; @@ -547,7 +557,7 @@ xfs_attri_item_recover( struct xfs_inode *ip; struct xfs_da_args *args; struct xfs_trans *tp; - struct xfs_trans_res tres; + struct xfs_trans_res resv; struct xfs_attri_log_format *attrp; struct xfs_attri_log_nameval *nv = attrip->attri_nameval; int error; @@ -594,8 +604,6 @@ xfs_attri_item_recover( args->op_flags = XFS_DA_OP_RECOVERY | XFS_DA_OP_OKNOENT | XFS_DA_OP_LOGGED; - ASSERT(xfs_sb_version_haslogxattrs(&mp->m_sb)); - switch (attr->xattri_op_flags) { case XFS_ATTRI_OP_FLAGS_SET: case XFS_ATTRI_OP_FLAGS_REPLACE: @@ -608,8 +616,6 @@ xfs_attri_item_recover( attr->xattri_dela_state = xfs_attr_init_add_state(args); break; case XFS_ATTRI_OP_FLAGS_REMOVE: - if (!xfs_inode_hasattr(args->dp)) - goto out; attr->xattri_dela_state = xfs_attr_init_remove_state(args); break; default: @@ -618,8 +624,9 @@ xfs_attri_item_recover( goto out; } - xfs_init_attr_trans(args, &tres, &total); - error = xfs_trans_alloc(mp, &tres, total, 0, XFS_TRANS_RESERVE, &tp); + xfs_init_attr_trans(args, &resv, &total); + resv = xlog_recover_resv(&resv); + error = xfs_trans_alloc(mp, &resv, total, 0, XFS_TRANS_RESERVE, &tp); if (error) goto out; @@ -710,48 +717,111 @@ xlog_recover_attri_commit_pass2( const void *attr_value = NULL; const void *attr_name; size_t len; - - attri_formatp = item->ri_buf[0].i_addr; - attr_name = item->ri_buf[1].i_addr; + unsigned int op, i = 0; /* Validate xfs_attri_log_format before the large memory allocation */ len = sizeof(struct xfs_attri_log_format); - if (item->ri_buf[0].i_len != len) { + if (item->ri_buf[i].i_len != len) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, item->ri_buf[0].i_addr, item->ri_buf[0].i_len); return -EFSCORRUPTED; } + attri_formatp = item->ri_buf[i].i_addr; if (!xfs_attri_validate(mp, attri_formatp)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + attri_formatp, len); return -EFSCORRUPTED; } + /* Check the number of log iovecs makes sense for the op code. */ + op = attri_formatp->alfi_op_flags & XFS_ATTRI_OP_FLAGS_TYPE_MASK; + switch (op) { + case XFS_ATTRI_OP_FLAGS_SET: + case XFS_ATTRI_OP_FLAGS_REPLACE: + /* Log item, attr name, attr value */ + if (item->ri_total != 3) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + attri_formatp, len); + return -EFSCORRUPTED; + } + break; + case XFS_ATTRI_OP_FLAGS_REMOVE: + /* Log item, attr name */ + if (item->ri_total != 2) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + attri_formatp, len); + return -EFSCORRUPTED; + } + break; + default: + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + attri_formatp, len); + return -EFSCORRUPTED; + } + i++; + /* Validate the attr name */ - if (item->ri_buf[1].i_len != + if (item->ri_buf[i].i_len != xlog_calc_iovec_len(attri_formatp->alfi_name_len)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[0].i_addr, item->ri_buf[0].i_len); + attri_formatp, len); return -EFSCORRUPTED; } + attr_name = item->ri_buf[i].i_addr; if (!xfs_attr_namecheck(attr_name, attri_formatp->alfi_name_len)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, - item->ri_buf[1].i_addr, item->ri_buf[1].i_len); + attri_formatp, len); return -EFSCORRUPTED; } + i++; /* Validate the attr value, if present */ if (attri_formatp->alfi_value_len != 0) { - if (item->ri_buf[2].i_len != xlog_calc_iovec_len(attri_formatp->alfi_value_len)) { + if (item->ri_buf[i].i_len != xlog_calc_iovec_len(attri_formatp->alfi_value_len)) { XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, item->ri_buf[0].i_addr, item->ri_buf[0].i_len); return -EFSCORRUPTED; } - attr_value = item->ri_buf[2].i_addr; + attr_value = item->ri_buf[i].i_addr; + i++; + } + + /* + * Make sure we got the correct number of buffers for the operation + * that we just loaded. + */ + if (i != item->ri_total) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + attri_formatp, len); + return -EFSCORRUPTED; + } + + switch (op) { + case XFS_ATTRI_OP_FLAGS_REMOVE: + /* Regular remove operations operate only on names. */ + if (attr_value != NULL || attri_formatp->alfi_value_len != 0) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + attri_formatp, len); + return -EFSCORRUPTED; + } + fallthrough; + case XFS_ATTRI_OP_FLAGS_SET: + case XFS_ATTRI_OP_FLAGS_REPLACE: + /* + * Regular xattr set/remove/replace operations require a name + * and do not take a newname. Values are optional for set and + * replace. + */ + if (attr_name == NULL || attri_formatp->alfi_name_len == 0) { + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, + attri_formatp, len); + return -EFSCORRUPTED; + } + break; } /* diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c index 41323da523d1..1058603db3ac 100644 --- a/fs/xfs/xfs_bmap_item.c +++ b/fs/xfs/xfs_bmap_item.c @@ -246,18 +246,11 @@ static int xfs_trans_log_finish_bmap_update( struct xfs_trans *tp, struct xfs_bud_log_item *budp, - enum xfs_bmap_intent_type type, - struct xfs_inode *ip, - int whichfork, - xfs_fileoff_t startoff, - xfs_fsblock_t startblock, - xfs_filblks_t *blockcount, - xfs_exntst_t state) + struct xfs_bmap_intent *bi) { int error; - error = xfs_bmap_finish_one(tp, ip, type, whichfork, startoff, - startblock, blockcount, state); + error = xfs_bmap_finish_one(tp, bi); /* * Mark the transaction dirty, even on error. This ensures the @@ -378,25 +371,17 @@ xfs_bmap_update_finish_item( struct list_head *item, struct xfs_btree_cur **state) { - struct xfs_bmap_intent *bmap; - xfs_filblks_t count; + struct xfs_bmap_intent *bi; int error; - bmap = container_of(item, struct xfs_bmap_intent, bi_list); - count = bmap->bi_bmap.br_blockcount; - error = xfs_trans_log_finish_bmap_update(tp, BUD_ITEM(done), - bmap->bi_type, - bmap->bi_owner, bmap->bi_whichfork, - bmap->bi_bmap.br_startoff, - bmap->bi_bmap.br_startblock, - &count, - bmap->bi_bmap.br_state); - if (!error && count > 0) { - ASSERT(bmap->bi_type == XFS_BMAP_UNMAP); - bmap->bi_bmap.br_blockcount = count; + bi = container_of(item, struct xfs_bmap_intent, bi_list); + + error = xfs_trans_log_finish_bmap_update(tp, BUD_ITEM(done), bi); + if (!error && bi->bi_bmap.br_blockcount > 0) { + ASSERT(bi->bi_type == XFS_BMAP_UNMAP); return -EAGAIN; } - kmem_cache_free(xfs_bmap_intent_cache, bmap); + kmem_cache_free(xfs_bmap_intent_cache, bi); return error; } @@ -471,17 +456,14 @@ xfs_bui_item_recover( struct xfs_log_item *lip, struct list_head *capture_list) { - struct xfs_bmbt_irec irec; + struct xfs_bmap_intent fake = { }; + struct xfs_trans_res resv; struct xfs_bui_log_item *buip = BUI_ITEM(lip); struct xfs_trans *tp; struct xfs_inode *ip = NULL; struct xfs_mount *mp = lip->li_log->l_mp; - struct xfs_map_extent *bmap; + struct xfs_map_extent *map; struct xfs_bud_log_item *budp; - xfs_filblks_t count; - xfs_exntst_t state; - unsigned int bui_type; - int whichfork; int iext_delta; int error = 0; @@ -491,19 +473,18 @@ xfs_bui_item_recover( return -EFSCORRUPTED; } - bmap = &buip->bui_format.bui_extents[0]; - state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? - XFS_EXT_UNWRITTEN : XFS_EXT_NORM; - whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ? + map = &buip->bui_format.bui_extents[0]; + fake.bi_whichfork = (map->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ? XFS_ATTR_FORK : XFS_DATA_FORK; - bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK; + fake.bi_type = map->me_flags & XFS_BMAP_EXTENT_TYPE_MASK; - error = xlog_recover_iget(mp, bmap->me_owner, &ip); + error = xlog_recover_iget(mp, map->me_owner, &ip); if (error) return error; /* Allocate transaction and do the work. */ - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, + resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate); + error = xfs_trans_alloc(mp, &resv, XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp); if (error) goto err_rele; @@ -512,34 +493,34 @@ xfs_bui_item_recover( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); - if (bui_type == XFS_BMAP_MAP) + if (fake.bi_type == XFS_BMAP_MAP) iext_delta = XFS_IEXT_ADD_NOSPLIT_CNT; else iext_delta = XFS_IEXT_PUNCH_HOLE_CNT; - error = xfs_iext_count_may_overflow(ip, whichfork, iext_delta); + error = xfs_iext_count_may_overflow(ip, fake.bi_whichfork, iext_delta); if (error == -EFBIG) error = xfs_iext_count_upgrade(tp, ip, iext_delta); if (error) goto err_cancel; - count = bmap->me_len; - error = xfs_trans_log_finish_bmap_update(tp, budp, bui_type, ip, - whichfork, bmap->me_startoff, bmap->me_startblock, - &count, state); + fake.bi_owner = ip; + fake.bi_bmap.br_startblock = map->me_startblock; + fake.bi_bmap.br_startoff = map->me_startoff; + fake.bi_bmap.br_blockcount = map->me_len; + fake.bi_bmap.br_state = (map->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ? + XFS_EXT_UNWRITTEN : XFS_EXT_NORM; + + error = xfs_trans_log_finish_bmap_update(tp, budp, &fake); if (error == -EFSCORRUPTED) - XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bmap, - sizeof(*bmap)); + XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, map, + sizeof(*map)); if (error) goto err_cancel; - if (count > 0) { - ASSERT(bui_type == XFS_BMAP_UNMAP); - irec.br_startblock = bmap->me_startblock; - irec.br_blockcount = count; - irec.br_startoff = bmap->me_startoff; - irec.br_state = state; - xfs_bmap_unmap_extent(tp, ip, &irec); + if (fake.bi_bmap.br_blockcount > 0) { + ASSERT(fake.bi_type == XFS_BMAP_UNMAP); + xfs_bmap_unmap_extent(tp, ip, &fake.bi_bmap); } /* diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index ce8e17ab5434..62b92e92a685 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -636,13 +636,11 @@ out_unlock: /* * Test whether it is appropriate to check an inode for and free post EOF - * blocks. The 'force' parameter determines whether we should also consider - * regular files that are marked preallocated or append-only. + * blocks. */ bool xfs_can_free_eofblocks( - struct xfs_inode *ip, - bool force) + struct xfs_inode *ip) { struct xfs_bmbt_irec imap; struct xfs_mount *mp = ip->i_mount; @@ -676,11 +674,11 @@ xfs_can_free_eofblocks( return false; /* - * Do not free real preallocated or append-only files unless the file - * has delalloc blocks and we are forced to remove them. + * Only free real extents for inodes with persistent preallocations or + * the append-only flag. */ if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) - if (!force || ip->i_delayed_blks == 0) + if (ip->i_delayed_blks == 0) return false; /* @@ -734,6 +732,22 @@ xfs_free_eofblocks( /* Wait on dio to ensure i_size has settled. */ inode_dio_wait(VFS_I(ip)); + /* + * For preallocated files only free delayed allocations. + * + * Note that this means we also leave speculative preallocations in + * place for preallocated files. + */ + if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) { + if (ip->i_delayed_blks) { + xfs_bmap_punch_delalloc_range(ip, + round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize), + LLONG_MAX); + } + xfs_inode_clear_eofblocks_tag(ip); + return 0; + } + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); if (error) { ASSERT(xfs_is_shutdown(mp)); @@ -780,12 +794,10 @@ xfs_alloc_file_space( { xfs_mount_t *mp = ip->i_mount; xfs_off_t count; - xfs_filblks_t allocated_fsb; xfs_filblks_t allocatesize_fsb; xfs_extlen_t extsz, temp; xfs_fileoff_t startoffset_fsb; xfs_fileoff_t endoffset_fsb; - int nimaps; int rt; xfs_trans_t *tp; xfs_bmbt_irec_t imaps[1], *imapp; @@ -808,7 +820,6 @@ xfs_alloc_file_space( count = len; imapp = &imaps[0]; - nimaps = 1; startoffset_fsb = XFS_B_TO_FSBT(mp, offset); endoffset_fsb = XFS_B_TO_FSB(mp, offset + count); allocatesize_fsb = endoffset_fsb - startoffset_fsb; @@ -819,6 +830,7 @@ xfs_alloc_file_space( while (allocatesize_fsb && !error) { xfs_fileoff_t s, e; unsigned int dblocks, rblocks, resblks; + int nimaps = 1; /* * Determine space reservations for data/realtime. @@ -870,29 +882,32 @@ xfs_alloc_file_space( if (error) goto error; + /* + * If the allocator cannot find a single free extent large + * enough to cover the start block of the requested range, + * xfs_bmapi_write will return -ENOSR. + * + * In that case we simply need to keep looping with the same + * startoffset_fsb so that one of the following allocations + * will eventually reach the requested range. + */ error = xfs_bmapi_write(tp, ip, startoffset_fsb, allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp, &nimaps); - if (error) - goto error; + if (error) { + if (error != -ENOSR) + goto error; + error = 0; + } else { + startoffset_fsb += imapp->br_blockcount; + allocatesize_fsb -= imapp->br_blockcount; + } ip->i_diflags |= XFS_DIFLAG_PREALLOC; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); - if (error) - break; - - allocated_fsb = imapp->br_blockcount; - - if (nimaps == 0) { - error = -ENOSPC; - break; - } - - startoffset_fsb += allocated_fsb; - allocatesize_fsb -= allocated_fsb; } return error; @@ -1047,7 +1062,7 @@ xfs_prepare_shift( * Trim eofblocks to avoid shifting uninitialized post-eof preallocation * into the accessible region of the file. */ - if (xfs_can_free_eofblocks(ip, true)) { + if (xfs_can_free_eofblocks(ip)) { error = xfs_free_eofblocks(ip); if (error) return error; diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 6888078f5c31..1383019ccdb7 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h @@ -63,7 +63,7 @@ int xfs_insert_file_space(struct xfs_inode *, xfs_off_t offset, xfs_off_t len); /* EOF block manipulation functions */ -bool xfs_can_free_eofblocks(struct xfs_inode *ip, bool force); +bool xfs_can_free_eofblocks(struct xfs_inode *ip); int xfs_free_eofblocks(struct xfs_inode *ip); int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip, diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 54c774af6e1c..257945cdf63b 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -2040,6 +2040,14 @@ error_free: return NULL; } +static inline void +xfs_buf_list_del( + struct xfs_buf *bp) +{ + list_del_init(&bp->b_list); + wake_up_var(&bp->b_list); +} + /* * Cancel a delayed write list. * @@ -2057,7 +2065,7 @@ xfs_buf_delwri_cancel( xfs_buf_lock(bp); bp->b_flags &= ~_XBF_DELWRI_Q; - list_del_init(&bp->b_list); + xfs_buf_list_del(bp); xfs_buf_relse(bp); } } @@ -2110,6 +2118,34 @@ xfs_buf_delwri_queue( return true; } +/* + * Queue a buffer to this delwri list as part of a data integrity operation. + * If the buffer is on any other delwri list, we'll wait for that to clear + * so that the caller can submit the buffer for IO and wait for the result. + * Callers must ensure the buffer is not already on the list. + */ +void +xfs_buf_delwri_queue_here( + struct xfs_buf *bp, + struct list_head *buffer_list) +{ + /* + * We need this buffer to end up on the /caller's/ delwri list, not any + * old list. This can happen if the buffer is marked stale (which + * clears DELWRI_Q) after the AIL queues the buffer to its list but + * before the AIL has a chance to submit the list. + */ + while (!list_empty(&bp->b_list)) { + xfs_buf_unlock(bp); + wait_var_event(&bp->b_list, list_empty(&bp->b_list)); + xfs_buf_lock(bp); + } + + ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); + + xfs_buf_delwri_queue(bp, buffer_list); +} + /* * Compare function is more complex than it needs to be because * the return value is only 32 bits and we are doing comparisons @@ -2172,7 +2208,7 @@ xfs_buf_delwri_submit_buffers( * reference and remove it from the list here. */ if (!(bp->b_flags & _XBF_DELWRI_Q)) { - list_del_init(&bp->b_list); + xfs_buf_list_del(bp); xfs_buf_relse(bp); continue; } @@ -2192,7 +2228,7 @@ xfs_buf_delwri_submit_buffers( list_move_tail(&bp->b_list, wait_list); } else { bp->b_flags |= XBF_ASYNC; - list_del_init(&bp->b_list); + xfs_buf_list_del(bp); } __xfs_buf_submit(bp, false); } @@ -2246,7 +2282,7 @@ xfs_buf_delwri_submit( while (!list_empty(&wait_list)) { bp = list_first_entry(&wait_list, struct xfs_buf, b_list); - list_del_init(&bp->b_list); + xfs_buf_list_del(bp); /* * Wait on the locked buffer, check for errors and unlock and diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 549c60942208..6cf0332ba62c 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -305,6 +305,7 @@ extern void xfs_buf_stale(struct xfs_buf *bp); /* Delayed Write Buffer Routines */ extern void xfs_buf_delwri_cancel(struct list_head *); extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); +void xfs_buf_delwri_queue_here(struct xfs_buf *bp, struct list_head *bl); extern int xfs_buf_delwri_submit(struct list_head *); extern int xfs_buf_delwri_submit_nowait(struct list_head *); extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *); diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index 7f071757f278..6186b69be50a 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -333,7 +333,6 @@ xfs_dquot_disk_alloc( goto err_cancel; ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); - ASSERT(nmaps == 1); ASSERT((map.br_startblock != DELAYSTARTBLOCK) && (map.br_startblock != HOLESTARTBLOCK)); @@ -562,7 +561,8 @@ xfs_dquot_from_disk( struct xfs_dquot *dqp, struct xfs_buf *bp) { - struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset; + struct xfs_dqblk *dqb = xfs_buf_offset(bp, dqp->q_bufoffset); + struct xfs_disk_dquot *ddqp = &dqb->dd_diskdq; /* * Ensure that we got the type and ID we were looking for. @@ -1250,7 +1250,7 @@ xfs_qm_dqflush( } /* Flush the incore dquot to the ondisk buffer. */ - dqblk = bp->b_addr + dqp->q_bufoffset; + dqblk = xfs_buf_offset(bp, dqp->q_bufoffset); xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp); /* diff --git a/fs/xfs/xfs_dquot_item_recover.c b/fs/xfs/xfs_dquot_item_recover.c index 8966ba842395..2c2720ce6923 100644 --- a/fs/xfs/xfs_dquot_item_recover.c +++ b/fs/xfs/xfs_dquot_item_recover.c @@ -19,6 +19,7 @@ #include "xfs_log.h" #include "xfs_log_priv.h" #include "xfs_log_recover.h" +#include "xfs_error.h" STATIC void xlog_recover_dquot_ra_pass2( @@ -65,6 +66,7 @@ xlog_recover_dquot_commit_pass2( { struct xfs_mount *mp = log->l_mp; struct xfs_buf *bp; + struct xfs_dqblk *dqb; struct xfs_disk_dquot *ddq, *recddq; struct xfs_dq_logformat *dq_f; xfs_failaddr_t fa; @@ -130,14 +132,14 @@ xlog_recover_dquot_commit_pass2( return error; ASSERT(bp); - ddq = xfs_buf_offset(bp, dq_f->qlf_boffset); + dqb = xfs_buf_offset(bp, dq_f->qlf_boffset); + ddq = &dqb->dd_diskdq; /* * If the dquot has an LSN in it, recover the dquot only if it's less * than the lsn of the transaction we are replaying. */ if (xfs_has_crc(mp)) { - struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq; xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn); if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) { @@ -147,10 +149,23 @@ xlog_recover_dquot_commit_pass2( memcpy(ddq, recddq, item->ri_buf[1].i_len); if (xfs_has_crc(mp)) { - xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk), + xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF); } + /* Validate the recovered dquot. */ + fa = xfs_dqblk_verify(log->l_mp, dqb, dq_f->qlf_id); + if (fa) { + XFS_CORRUPTION_ERROR("Bad dquot after recovery", + XFS_ERRLEVEL_LOW, mp, dqb, + sizeof(struct xfs_dqblk)); + xfs_alert(mp, + "Metadata corruption detected at %pS, dquot 0x%x", + fa, dq_f->qlf_id); + error = -EFSCORRUPTED; + goto out_release; + } + ASSERT(dq_f->qlf_size == 2); ASSERT(bp->b_mount == mp); bp->b_flags |= _XBF_LOGRECOVERY; diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index d5130d1fcfae..be9f279a5c75 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -345,23 +345,29 @@ static int xfs_trans_free_extent( struct xfs_trans *tp, struct xfs_efd_log_item *efdp, - xfs_fsblock_t start_block, - xfs_extlen_t ext_len, - const struct xfs_owner_info *oinfo, - bool skip_discard) + struct xfs_extent_free_item *xefi) { + struct xfs_owner_info oinfo = { }; struct xfs_mount *mp = tp->t_mountp; struct xfs_extent *extp; uint next_extent; - xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, start_block); xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, - start_block); + xefi->xefi_startblock); int error; - trace_xfs_bmap_free_deferred(tp->t_mountp, agno, 0, agbno, ext_len); + oinfo.oi_owner = xefi->xefi_owner; + if (xefi->xefi_flags & XFS_EFI_ATTR_FORK) + oinfo.oi_flags |= XFS_OWNER_INFO_ATTR_FORK; + if (xefi->xefi_flags & XFS_EFI_BMBT_BLOCK) + oinfo.oi_flags |= XFS_OWNER_INFO_BMBT_BLOCK; + + trace_xfs_bmap_free_deferred(tp->t_mountp, xefi->xefi_pag->pag_agno, 0, + agbno, xefi->xefi_blockcount); + + error = __xfs_free_extent(tp, xefi->xefi_pag, agbno, + xefi->xefi_blockcount, &oinfo, xefi->xefi_agresv, + xefi->xefi_flags & XFS_EFI_SKIP_DISCARD); - error = __xfs_free_extent(tp, start_block, ext_len, - oinfo, XFS_AG_RESV_NONE, skip_discard); /* * Mark the transaction dirty, even on error. This ensures the * transaction is aborted, which: @@ -375,8 +381,8 @@ xfs_trans_free_extent( next_extent = efdp->efd_next_extent; ASSERT(next_extent < efdp->efd_format.efd_nextents); extp = &(efdp->efd_format.efd_extents[next_extent]); - extp->ext_start = start_block; - extp->ext_len = ext_len; + extp->ext_start = xefi->xefi_startblock; + extp->ext_len = xefi->xefi_blockcount; efdp->efd_next_extent++; return error; @@ -389,14 +395,13 @@ xfs_extent_free_diff_items( const struct list_head *a, const struct list_head *b) { - struct xfs_mount *mp = priv; struct xfs_extent_free_item *ra; struct xfs_extent_free_item *rb; ra = container_of(a, struct xfs_extent_free_item, xefi_list); rb = container_of(b, struct xfs_extent_free_item, xefi_list); - return XFS_FSB_TO_AGNO(mp, ra->xefi_startblock) - - XFS_FSB_TO_AGNO(mp, rb->xefi_startblock); + + return ra->xefi_pag->pag_agno - rb->xefi_pag->pag_agno; } /* Log a free extent to the intent item. */ @@ -404,7 +409,7 @@ STATIC void xfs_extent_free_log_item( struct xfs_trans *tp, struct xfs_efi_log_item *efip, - struct xfs_extent_free_item *free) + struct xfs_extent_free_item *xefi) { uint next_extent; struct xfs_extent *extp; @@ -420,8 +425,8 @@ xfs_extent_free_log_item( next_extent = atomic_inc_return(&efip->efi_next_extent) - 1; ASSERT(next_extent < efip->efi_format.efi_nextents); extp = &efip->efi_format.efi_extents[next_extent]; - extp->ext_start = free->xefi_startblock; - extp->ext_len = free->xefi_blockcount; + extp->ext_start = xefi->xefi_startblock; + extp->ext_len = xefi->xefi_blockcount; } static struct xfs_log_item * @@ -433,15 +438,15 @@ xfs_extent_free_create_intent( { struct xfs_mount *mp = tp->t_mountp; struct xfs_efi_log_item *efip = xfs_efi_init(mp, count); - struct xfs_extent_free_item *free; + struct xfs_extent_free_item *xefi; ASSERT(count > 0); xfs_trans_add_item(tp, &efip->efi_item); if (sort) list_sort(mp, items, xfs_extent_free_diff_items); - list_for_each_entry(free, items, xefi_list) - xfs_extent_free_log_item(tp, efip, free); + list_for_each_entry(xefi, items, xefi_list) + xfs_extent_free_log_item(tp, efip, xefi); return &efip->efi_item; } @@ -455,6 +460,26 @@ xfs_extent_free_create_done( return &xfs_trans_get_efd(tp, EFI_ITEM(intent), count)->efd_item; } +/* Take a passive ref to the AG containing the space we're freeing. */ +void +xfs_extent_free_get_group( + struct xfs_mount *mp, + struct xfs_extent_free_item *xefi) +{ + xfs_agnumber_t agno; + + agno = XFS_FSB_TO_AGNO(mp, xefi->xefi_startblock); + xefi->xefi_pag = xfs_perag_get(mp, agno); +} + +/* Release a passive AG ref after some freeing work. */ +static inline void +xfs_extent_free_put_group( + struct xfs_extent_free_item *xefi) +{ + xfs_perag_put(xefi->xefi_pag); +} + /* Process a free extent. */ STATIC int xfs_extent_free_finish_item( @@ -463,21 +488,15 @@ xfs_extent_free_finish_item( struct list_head *item, struct xfs_btree_cur **state) { - struct xfs_owner_info oinfo = { }; - struct xfs_extent_free_item *free; + struct xfs_extent_free_item *xefi; int error; - free = container_of(item, struct xfs_extent_free_item, xefi_list); - oinfo.oi_owner = free->xefi_owner; - if (free->xefi_flags & XFS_EFI_ATTR_FORK) - oinfo.oi_flags |= XFS_OWNER_INFO_ATTR_FORK; - if (free->xefi_flags & XFS_EFI_BMBT_BLOCK) - oinfo.oi_flags |= XFS_OWNER_INFO_BMBT_BLOCK; - error = xfs_trans_free_extent(tp, EFD_ITEM(done), - free->xefi_startblock, - free->xefi_blockcount, - &oinfo, free->xefi_flags & XFS_EFI_SKIP_DISCARD); - kmem_cache_free(xfs_extfree_item_cache, free); + xefi = container_of(item, struct xfs_extent_free_item, xefi_list); + + error = xfs_trans_free_extent(tp, EFD_ITEM(done), xefi); + + xfs_extent_free_put_group(xefi); + kmem_cache_free(xfs_extfree_item_cache, xefi); return error; } @@ -494,10 +513,12 @@ STATIC void xfs_extent_free_cancel_item( struct list_head *item) { - struct xfs_extent_free_item *free; + struct xfs_extent_free_item *xefi; - free = container_of(item, struct xfs_extent_free_item, xefi_list); - kmem_cache_free(xfs_extfree_item_cache, free); + xefi = container_of(item, struct xfs_extent_free_item, xefi_list); + + xfs_extent_free_put_group(xefi); + kmem_cache_free(xfs_extfree_item_cache, xefi); } const struct xfs_defer_op_type xfs_extent_free_defer_type = { @@ -523,28 +544,25 @@ xfs_agfl_free_finish_item( struct xfs_owner_info oinfo = { }; struct xfs_mount *mp = tp->t_mountp; struct xfs_efd_log_item *efdp = EFD_ITEM(done); - struct xfs_extent_free_item *free; + struct xfs_extent_free_item *xefi; struct xfs_extent *extp; struct xfs_buf *agbp; int error; - xfs_agnumber_t agno; xfs_agblock_t agbno; uint next_extent; - struct xfs_perag *pag; - free = container_of(item, struct xfs_extent_free_item, xefi_list); - ASSERT(free->xefi_blockcount == 1); - agno = XFS_FSB_TO_AGNO(mp, free->xefi_startblock); - agbno = XFS_FSB_TO_AGBNO(mp, free->xefi_startblock); - oinfo.oi_owner = free->xefi_owner; + xefi = container_of(item, struct xfs_extent_free_item, xefi_list); + ASSERT(xefi->xefi_blockcount == 1); + agbno = XFS_FSB_TO_AGBNO(mp, xefi->xefi_startblock); + oinfo.oi_owner = xefi->xefi_owner; - trace_xfs_agfl_free_deferred(mp, agno, 0, agbno, free->xefi_blockcount); + trace_xfs_agfl_free_deferred(mp, xefi->xefi_pag->pag_agno, 0, agbno, + xefi->xefi_blockcount); - pag = xfs_perag_get(mp, agno); - error = xfs_alloc_read_agf(pag, tp, 0, &agbp); + error = xfs_alloc_read_agf(xefi->xefi_pag, tp, 0, &agbp); if (!error) - error = xfs_free_agfl_block(tp, agno, agbno, agbp, &oinfo); - xfs_perag_put(pag); + error = xfs_free_agfl_block(tp, xefi->xefi_pag->pag_agno, + agbno, agbp, &oinfo); /* * Mark the transaction dirty, even on error. This ensures the @@ -559,11 +577,12 @@ xfs_agfl_free_finish_item( next_extent = efdp->efd_next_extent; ASSERT(next_extent < efdp->efd_format.efd_nextents); extp = &(efdp->efd_format.efd_extents[next_extent]); - extp->ext_start = free->xefi_startblock; - extp->ext_len = free->xefi_blockcount; + extp->ext_start = xefi->xefi_startblock; + extp->ext_len = xefi->xefi_blockcount; efdp->efd_next_extent++; - kmem_cache_free(xfs_extfree_item_cache, free); + xfs_extent_free_put_group(xefi); + kmem_cache_free(xfs_extfree_item_cache, xefi); return error; } @@ -595,11 +614,11 @@ xfs_efi_item_recover( struct xfs_log_item *lip, struct list_head *capture_list) { + struct xfs_trans_res resv; struct xfs_efi_log_item *efip = EFI_ITEM(lip); struct xfs_mount *mp = lip->li_log->l_mp; struct xfs_efd_log_item *efdp; struct xfs_trans *tp; - struct xfs_extent *extp; int i; int error = 0; @@ -618,16 +637,27 @@ xfs_efi_item_recover( } } - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); + resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate); + error = xfs_trans_alloc(mp, &resv, 0, 0, 0, &tp); if (error) return error; efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents); for (i = 0; i < efip->efi_format.efi_nextents; i++) { + struct xfs_extent_free_item fake = { + .xefi_owner = XFS_RMAP_OWN_UNKNOWN, + .xefi_agresv = XFS_AG_RESV_NONE, + }; + struct xfs_extent *extp; + extp = &efip->efi_format.efi_extents[i]; - error = xfs_trans_free_extent(tp, efdp, extp->ext_start, - extp->ext_len, - &XFS_RMAP_OINFO_ANY_OWNER, false); + + fake.xefi_startblock = extp->ext_start; + fake.xefi_blockcount = extp->ext_len; + + xfs_extent_free_get_group(mp, &fake); + error = xfs_trans_free_extent(tp, efdp, &fake); + xfs_extent_free_put_group(&fake); if (error == -EFSCORRUPTED) XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, extp, sizeof(*extp)); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 595a5bcf46b9..821cb86a83bd 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -214,6 +214,43 @@ xfs_ilock_iocb( return 0; } +static int +xfs_ilock_iocb_for_write( + struct kiocb *iocb, + unsigned int *lock_mode) +{ + ssize_t ret; + struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp)); + + ret = xfs_ilock_iocb(iocb, *lock_mode); + if (ret) + return ret; + + if (*lock_mode == XFS_IOLOCK_EXCL) + return 0; + if (!xfs_iflags_test(ip, XFS_IREMAPPING)) + return 0; + + xfs_iunlock(ip, *lock_mode); + *lock_mode = XFS_IOLOCK_EXCL; + return xfs_ilock_iocb(iocb, *lock_mode); +} + +static unsigned int +xfs_ilock_for_write_fault( + struct xfs_inode *ip) +{ + /* get a shared lock if no remapping in progress */ + xfs_ilock(ip, XFS_MMAPLOCK_SHARED); + if (!xfs_iflags_test(ip, XFS_IREMAPPING)) + return XFS_MMAPLOCK_SHARED; + + /* wait for remapping to complete */ + xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); + xfs_ilock(ip, XFS_MMAPLOCK_EXCL); + return XFS_MMAPLOCK_EXCL; +} + STATIC ssize_t xfs_file_dio_read( struct kiocb *iocb, @@ -523,7 +560,7 @@ xfs_file_dio_write_aligned( unsigned int iolock = XFS_IOLOCK_SHARED; ssize_t ret; - ret = xfs_ilock_iocb(iocb, iolock); + ret = xfs_ilock_iocb_for_write(iocb, &iolock); if (ret) return ret; ret = xfs_file_write_checks(iocb, from, &iolock); @@ -590,7 +627,7 @@ retry_exclusive: flags = IOMAP_DIO_FORCE_WAIT; } - ret = xfs_ilock_iocb(iocb, iolock); + ret = xfs_ilock_iocb_for_write(iocb, &iolock); if (ret) return ret; @@ -1158,9 +1195,17 @@ xfs_file_remap_range( if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out)) xfs_log_force_inode(dest); out_unlock: - xfs_iunlock2_io_mmap(src, dest); + xfs_iunlock2_remapping(src, dest); if (ret) trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); + /* + * If the caller did not set CAN_SHORTEN, then it is not prepared to + * handle partial results -- either the whole remap succeeds, or we + * must say why it did not. In this case, any error should be returned + * to the caller. + */ + if (ret && remapped < len && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) + return ret; return remapped > 0 ? remapped : ret; } @@ -1305,6 +1350,7 @@ __xfs_filemap_fault( struct inode *inode = file_inode(vmf->vma->vm_file); struct xfs_inode *ip = XFS_I(inode); vm_fault_t ret; + unsigned int lock_mode = 0; trace_xfs_filemap_fault(ip, pe_size, write_fault); @@ -1313,25 +1359,24 @@ __xfs_filemap_fault( file_update_time(vmf->vma->vm_file); } + if (IS_DAX(inode) || write_fault) + lock_mode = xfs_ilock_for_write_fault(XFS_I(inode)); + if (IS_DAX(inode)) { pfn_t pfn; - xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); ret = xfs_dax_fault(vmf, pe_size, write_fault, &pfn); if (ret & VM_FAULT_NEEDDSYNC) ret = dax_finish_sync_fault(vmf, pe_size, pfn); - xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); + } else if (write_fault) { + ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops); } else { - if (write_fault) { - xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); - ret = iomap_page_mkwrite(vmf, - &xfs_page_mkwrite_iomap_ops); - xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); - } else { - ret = filemap_fault(vmf); - } + ret = filemap_fault(vmf); } + if (lock_mode) + xfs_iunlock(XFS_I(inode), lock_mode); + if (write_fault) sb_end_pagefault(inode->i_sb); return ret; diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 062e5dc5db9f..a5b9754c62d1 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c @@ -23,7 +23,7 @@ #include "xfs_refcount.h" #include "xfs_refcount_btree.h" #include "xfs_alloc_btree.h" -#include "xfs_rtalloc.h" +#include "xfs_rtbitmap.h" #include "xfs_ag.h" /* Convert an xfs_fsmap to an fsmap. */ diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 77b14f788214..96e9d64fbe62 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -153,7 +153,7 @@ xfs_growfs_data_private( (delta > 0 ? XFS_GROWFS_SPACE_RES(mp) : -delta), 0, XFS_TRANS_RESERVE, &tp); if (error) - return error; + goto out_free_unused_perag; last_pag = xfs_perag_get(mp, oagcount - 1); if (delta > 0) { @@ -227,6 +227,9 @@ xfs_growfs_data_private( out_trans_cancel: xfs_trans_cancel(tp); +out_free_unused_perag: + if (nagcount > oagcount) + xfs_free_unused_perag_range(mp, oagcount, nagcount); return error; } diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 6df826fc787c..586d26c05160 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -1186,7 +1186,7 @@ xfs_inode_free_eofblocks( } *lockflags |= XFS_IOLOCK_EXCL; - if (xfs_can_free_eofblocks(ip, false)) + if (xfs_can_free_eofblocks(ip)) return xfs_free_eofblocks(ip); /* inode could be preallocated or append-only */ diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 909085269227..b26d26d29273 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -919,6 +919,13 @@ xfs_droplink( xfs_trans_t *tp, xfs_inode_t *ip) { + if (VFS_I(ip)->i_nlink == 0) { + xfs_alert(ip->i_mount, + "%s: Attempt to drop inode (%llu) with nlink zero.", + __func__, ip->i_ino); + return -EFSCORRUPTED; + } + xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); drop_nlink(VFS_I(ip)); @@ -1452,7 +1459,7 @@ xfs_release( if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) return 0; - if (xfs_can_free_eofblocks(ip, false)) { + if (xfs_can_free_eofblocks(ip)) { /* * Check if the inode is being opened, written and closed * frequently and we have delayed allocation blocks outstanding @@ -1668,15 +1675,13 @@ xfs_inode_needs_inactive( /* * This file isn't being freed, so check if there are post-eof blocks - * to free. @force is true because we are evicting an inode from the - * cache. Post-eof blocks must be freed, lest we end up with broken - * free space accounting. + * to free. * * Note: don't bother with iolock here since lockdep complains about * acquiring it in reclaim context. We have the only reference to the * inode at this point anyways. */ - return xfs_can_free_eofblocks(ip, true); + return xfs_can_free_eofblocks(ip); } /* @@ -1719,20 +1724,19 @@ xfs_inactive( goto out; /* Try to clean out the cow blocks if there are any. */ - if (xfs_inode_has_cow_data(ip)) - xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); + if (xfs_inode_has_cow_data(ip)) { + error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); + if (error) + goto out; + } if (VFS_I(ip)->i_nlink != 0) { /* - * force is true because we are evicting an inode from the - * cache. Post-eof blocks must be freed, lest we end up with - * broken free space accounting. - * * Note: don't bother with iolock here since lockdep complains * about acquiring it in reclaim context. We have the only * reference to the inode at this point anyways. */ - if (xfs_can_free_eofblocks(ip, true)) + if (xfs_can_free_eofblocks(ip)) error = xfs_free_eofblocks(ip); goto out; @@ -3637,6 +3641,23 @@ xfs_iunlock2_io_mmap( inode_unlock(VFS_I(ip1)); } +/* Drop the MMAPLOCK and the IOLOCK after a remap completes. */ +void +xfs_iunlock2_remapping( + struct xfs_inode *ip1, + struct xfs_inode *ip2) +{ + xfs_iflags_clear(ip1, XFS_IREMAPPING); + + if (ip1 != ip2) + xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED); + xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); + + if (ip1 != ip2) + inode_unlock_shared(VFS_I(ip1)); + inode_unlock(VFS_I(ip2)); +} + /* * Reload the incore inode list for this inode. Caller should ensure that * the link count cannot change, either by taking ILOCK_SHARED or otherwise diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 85395ad2859c..c177c92f3aa5 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -347,6 +347,14 @@ static inline bool xfs_inode_has_large_extent_counts(struct xfs_inode *ip) /* Quotacheck is running but inode has not been added to quota counts. */ #define XFS_IQUOTAUNCHECKED (1 << 14) +/* + * Remap in progress. Callers that wish to update file data while + * holding a shared IOLOCK or MMAPLOCK must drop the lock and retake + * the lock in exclusive mode. Relocking the file will block until + * IREMAPPING is cleared. + */ +#define XFS_IREMAPPING (1U << 15) + /* All inode state flags related to inode reclaim. */ #define XFS_ALL_IRECLAIM_FLAGS (XFS_IRECLAIMABLE | \ XFS_IRECLAIM | \ @@ -561,6 +569,14 @@ extern void xfs_setup_inode(struct xfs_inode *ip); extern void xfs_setup_iops(struct xfs_inode *ip); extern void xfs_diflags_to_iflags(struct xfs_inode *ip, bool init); +static inline void xfs_update_stable_writes(struct xfs_inode *ip) +{ + if (bdev_stable_writes(xfs_inode_buftarg(ip)->bt_bdev)) + mapping_set_stable_writes(VFS_I(ip)->i_mapping); + else + mapping_clear_stable_writes(VFS_I(ip)->i_mapping); +} + /* * When setting up a newly allocated inode, we need to call * xfs_finish_inode_setup() once the inode is fully instantiated at @@ -595,6 +611,7 @@ void xfs_end_io(struct work_struct *work); int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2); void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2); +void xfs_iunlock2_remapping(struct xfs_inode *ip1, struct xfs_inode *ip2); static inline bool xfs_inode_unlinked_incomplete( diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 91c847a84e10..2ec23c9af760 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -556,6 +556,9 @@ xfs_inode_to_log_dinode( memset(to->di_pad2, 0, sizeof(to->di_pad2)); uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid); to->di_v3_pad = 0; + + /* dummy value for initialisation */ + to->di_crc = 0; } else { to->di_version = 2; to->di_flushiter = ip->i_flushiter; diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c index e6609067ef26..144198a6b270 100644 --- a/fs/xfs/xfs_inode_item_recover.c +++ b/fs/xfs/xfs_inode_item_recover.c @@ -286,6 +286,7 @@ xlog_recover_inode_commit_pass2( struct xfs_log_dinode *ldip; uint isize; int need_free = 0; + xfs_failaddr_t fa; if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) { in_f = item->ri_buf[0].i_addr; @@ -530,8 +531,19 @@ out_owner_change: (dip->di_mode != 0)) error = xfs_recover_inode_owner_change(mp, dip, in_f, buffer_list); - /* re-generate the checksum. */ + /* re-generate the checksum and validate the recovered inode. */ xfs_dinode_calc_crc(log->l_mp, dip); + fa = xfs_dinode_verify(log->l_mp, in_f->ilf_ino, dip); + if (fa) { + XFS_CORRUPTION_ERROR( + "Bad dinode after recovery", + XFS_ERRLEVEL_LOW, mp, dip, sizeof(*dip)); + xfs_alert(mp, + "Metadata corruption detected at %pS, inode 0x%llx", + fa, in_f->ilf_ino); + error = -EFSCORRUPTED; + goto out_release; + } ASSERT(bp->b_mount == mp); bp->b_flags |= _XBF_LOGRECOVERY; diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 85fbb3b71d1c..c7cb496dc345 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1120,23 +1120,25 @@ xfs_ioctl_setattr_xflags( struct fileattr *fa) { struct xfs_mount *mp = ip->i_mount; + bool rtflag = (fa->fsx_xflags & FS_XFLAG_REALTIME); uint64_t i_flags2; - /* Can't change realtime flag if any extents are allocated. */ - if ((ip->i_df.if_nextents || ip->i_delayed_blks) && - XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME)) - return -EINVAL; - - /* If realtime flag is set then must have realtime device */ - if (fa->fsx_xflags & FS_XFLAG_REALTIME) { - if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || - (ip->i_extsize % mp->m_sb.sb_rextsize)) + if (rtflag != XFS_IS_REALTIME_INODE(ip)) { + /* Can't change realtime flag if any extents are allocated. */ + if (ip->i_df.if_nextents || ip->i_delayed_blks) return -EINVAL; } - /* Clear reflink if we are actually able to set the rt flag. */ - if ((fa->fsx_xflags & FS_XFLAG_REALTIME) && xfs_is_reflink_inode(ip)) - ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; + if (rtflag) { + /* If realtime flag is set then must have realtime device */ + if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 || + (ip->i_extsize % mp->m_sb.sb_rextsize)) + return -EINVAL; + + /* Clear reflink if we are actually able to set the rt flag. */ + if (xfs_is_reflink_inode(ip)) + ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; + } /* Don't allow us to set DAX mode for a reflinked file for now. */ if ((fa->fsx_xflags & FS_XFLAG_DAX) && xfs_is_reflink_inode(ip)) @@ -1151,6 +1153,14 @@ xfs_ioctl_setattr_xflags( ip->i_diflags2 = i_flags2; xfs_diflags_to_iflags(ip, false); + + /* + * Make the stable writes flag match that of the device the inode + * resides on when flipping the RT flag. + */ + if (rtflag != XFS_IS_REALTIME_INODE(ip) && S_ISREG(VFS_I(ip)->i_mode)) + xfs_update_stable_writes(ip); + xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); XFS_STATS_INC(mp, xs_ig_attrchg); diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index ab5512c0bcf7..28a1c19dfdb3 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -309,14 +309,6 @@ xfs_iomap_write_direct( if (error) goto out_unlock; - /* - * Copy any maps to caller's array and return any error. - */ - if (nimaps == 0) { - error = -ENOSPC; - goto out_unlock; - } - if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) error = xfs_alert_fsblock_zero(ip, imap); @@ -1004,6 +996,24 @@ xfs_buffered_write_iomap_begin( goto out_unlock; } + /* + * For zeroing, trim a delalloc extent that extends beyond the EOF + * block. If it starts beyond the EOF block, convert it to an + * unwritten extent. + */ + if ((flags & IOMAP_ZERO) && imap.br_startoff <= offset_fsb && + isnullstartblock(imap.br_startblock)) { + xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); + + if (offset_fsb >= eof_fsb) + goto convert_delay; + if (end_fsb > eof_fsb) { + end_fsb = eof_fsb; + xfs_trim_extent(&imap, offset_fsb, + end_fsb - offset_fsb); + } + } + /* * Search the COW fork extent list even if we did not find a data fork * extent. This serves two purposes: first this implements the @@ -1105,47 +1115,48 @@ xfs_buffered_write_iomap_begin( } } -retry: - error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, - end_fsb - offset_fsb, prealloc_blocks, - allocfork == XFS_DATA_FORK ? &imap : &cmap, - allocfork == XFS_DATA_FORK ? &icur : &ccur, - allocfork == XFS_DATA_FORK ? eof : cow_eof); - switch (error) { - case 0: - break; - case -ENOSPC: - case -EDQUOT: - /* retry without any preallocation */ - trace_xfs_delalloc_enospc(ip, offset, count); - if (prealloc_blocks) { - prealloc_blocks = 0; - goto retry; - } - fallthrough; - default: - goto out_unlock; - } - if (allocfork == XFS_COW_FORK) { + error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, + end_fsb - offset_fsb, prealloc_blocks, &cmap, + &ccur, cow_eof); + if (error) + goto out_unlock; + trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap); goto found_cow; } + error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, + end_fsb - offset_fsb, prealloc_blocks, &imap, &icur, + eof); + if (error) + goto out_unlock; + /* * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch * them out if the write happens to fail. */ seq = xfs_iomap_inode_sequence(ip, IOMAP_F_NEW); - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_iunlock(ip, lockmode); trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW, seq); found_imap: seq = xfs_iomap_inode_sequence(ip, 0); - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_iunlock(ip, lockmode); return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq); +convert_delay: + xfs_iunlock(ip, lockmode); + truncate_pagecache(inode, offset); + error = xfs_bmapi_convert_delalloc(ip, XFS_DATA_FORK, offset, + iomap, NULL); + if (error) + return error; + + trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &imap); + return 0; + found_cow: seq = xfs_iomap_inode_sequence(ip, 0); if (imap.br_startoff <= offset_fsb) { @@ -1153,17 +1164,17 @@ found_cow: if (error) goto out_unlock; seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED); - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_iunlock(ip, lockmode); return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq); } xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb); - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_iunlock(ip, lockmode); return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0, seq); out_unlock: - xfs_iunlock(ip, XFS_ILOCK_EXCL); + xfs_iunlock(ip, lockmode); return error; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 2e10e1c66ad6..6fbdc0a19e54 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -1291,6 +1291,13 @@ xfs_setup_inode( gfp_mask = mapping_gfp_mask(inode->i_mapping); mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); + /* + * For real-time inodes update the stable write flags to that of the RT + * device instead of the data device. + */ + if (S_ISREG(inode->i_mode) && XFS_IS_REALTIME_INODE(ip)) + xfs_update_stable_writes(ip); + /* * If there is no attribute fork no ACL can exist on this inode, * and it can't have any file capabilities attached to it either. diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 59c982297503..ce6b303484cf 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -1891,9 +1891,7 @@ xlog_write_iclog( * the buffer manually, the code needs to be kept in sync * with the I/O completion path. */ - xlog_state_done_syncing(iclog); - up(&iclog->ic_sema); - return; + goto sync; } /* @@ -1923,20 +1921,17 @@ xlog_write_iclog( * avoid shutdown re-entering this path and erroring out again. */ if (log->l_targ != log->l_mp->m_ddev_targp && - blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) { - xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); - return; - } + blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev)) + goto shutdown; } if (iclog->ic_flags & XLOG_ICL_NEED_FUA) iclog->ic_bio.bi_opf |= REQ_FUA; iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA); - if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) { - xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); - return; - } + if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) + goto shutdown; + if (is_vmalloc_addr(iclog->ic_data)) flush_kernel_vmap_range(iclog->ic_data, count); @@ -1957,6 +1952,12 @@ xlog_write_iclog( } submit_bio(&iclog->ic_bio); + return; +shutdown: + xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR); +sync: + xlog_state_done_syncing(iclog); + up(&iclog->ic_sema); } /* diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index affe94356ed1..e009bb23d8a2 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -2439,7 +2439,10 @@ xlog_recover_process_data( ohead = (struct xlog_op_header *)dp; dp += sizeof(*ohead); - ASSERT(dp <= end); + if (dp > end) { + xfs_warn(log->l_mp, "%s: op header overrun", __func__); + return -EFSCORRUPTED; + } /* errors will abort recovery */ error = xlog_recover_process_ophdr(log, rhash, rhead, ohead, @@ -2511,7 +2514,7 @@ xlog_abort_defer_ops( list_for_each_entry_safe(dfc, next, capture_list, dfc_list) { list_del_init(&dfc->dfc_list); - xfs_defer_ops_capture_free(mp, dfc); + xfs_defer_ops_capture_abort(mp, dfc); } } diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index b77673dd0558..26b2c449f3c6 100644 --- a/fs/xfs/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c @@ -19,28 +19,41 @@ STATIC void xfs_fill_statvfs_from_dquot( struct kstatfs *statp, + struct xfs_inode *ip, struct xfs_dquot *dqp) { + struct xfs_dquot_res *blkres = &dqp->q_blk; uint64_t limit; - limit = dqp->q_blk.softlimit ? - dqp->q_blk.softlimit : - dqp->q_blk.hardlimit; - if (limit && statp->f_blocks > limit) { - statp->f_blocks = limit; - statp->f_bfree = statp->f_bavail = - (statp->f_blocks > dqp->q_blk.reserved) ? - (statp->f_blocks - dqp->q_blk.reserved) : 0; + if (XFS_IS_REALTIME_MOUNT(ip->i_mount) && + (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) + blkres = &dqp->q_rtb; + + limit = blkres->softlimit ? + blkres->softlimit : + blkres->hardlimit; + if (limit) { + uint64_t remaining = 0; + + if (limit > blkres->reserved) + remaining = limit - blkres->reserved; + + statp->f_blocks = min(statp->f_blocks, limit); + statp->f_bfree = min(statp->f_bfree, remaining); + statp->f_bavail = min(statp->f_bavail, remaining); } limit = dqp->q_ino.softlimit ? dqp->q_ino.softlimit : dqp->q_ino.hardlimit; - if (limit && statp->f_files > limit) { - statp->f_files = limit; - statp->f_ffree = - (statp->f_files > dqp->q_ino.reserved) ? - (statp->f_files - dqp->q_ino.reserved) : 0; + if (limit) { + uint64_t remaining = 0; + + if (limit > dqp->q_ino.reserved) + remaining = limit - dqp->q_ino.reserved; + + statp->f_files = min(statp->f_files, limit); + statp->f_ffree = min(statp->f_ffree, remaining); } } @@ -61,7 +74,7 @@ xfs_qm_statvfs( struct xfs_dquot *dqp; if (!xfs_qm_dqget(mp, ip->i_projid, XFS_DQTYPE_PROJ, false, &dqp)) { - xfs_fill_statvfs_from_dquot(statp, dqp); + xfs_fill_statvfs_from_dquot(statp, ip, dqp); xfs_qm_dqput(dqp); } } diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index 858e3e9eb4a8..dfd7b824e32b 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -252,17 +252,12 @@ static int xfs_trans_log_finish_refcount_update( struct xfs_trans *tp, struct xfs_cud_log_item *cudp, - enum xfs_refcount_intent_type type, - xfs_fsblock_t startblock, - xfs_extlen_t blockcount, - xfs_fsblock_t *new_fsb, - xfs_extlen_t *new_len, + struct xfs_refcount_intent *ri, struct xfs_btree_cur **pcur) { int error; - error = xfs_refcount_finish_one(tp, type, startblock, - blockcount, new_fsb, new_len, pcur); + error = xfs_refcount_finish_one(tp, ri, pcur); /* * Mark the transaction dirty, even on error. This ensures the @@ -378,25 +373,20 @@ xfs_refcount_update_finish_item( struct list_head *item, struct xfs_btree_cur **state) { - struct xfs_refcount_intent *refc; - xfs_fsblock_t new_fsb; - xfs_extlen_t new_aglen; + struct xfs_refcount_intent *ri; int error; - refc = container_of(item, struct xfs_refcount_intent, ri_list); - error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), - refc->ri_type, refc->ri_startblock, refc->ri_blockcount, - &new_fsb, &new_aglen, state); + ri = container_of(item, struct xfs_refcount_intent, ri_list); + error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), ri, + state); /* Did we run out of reservation? Requeue what we didn't finish. */ - if (!error && new_aglen > 0) { - ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE || - refc->ri_type == XFS_REFCOUNT_DECREASE); - refc->ri_startblock = new_fsb; - refc->ri_blockcount = new_aglen; + if (!error && ri->ri_blockcount > 0) { + ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE || + ri->ri_type == XFS_REFCOUNT_DECREASE); return -EAGAIN; } - kmem_cache_free(xfs_refcount_intent_cache, refc); + kmem_cache_free(xfs_refcount_intent_cache, ri); return error; } @@ -463,18 +453,14 @@ xfs_cui_item_recover( struct xfs_log_item *lip, struct list_head *capture_list) { - struct xfs_bmbt_irec irec; + struct xfs_trans_res resv; struct xfs_cui_log_item *cuip = CUI_ITEM(lip); - struct xfs_phys_extent *refc; struct xfs_cud_log_item *cudp; struct xfs_trans *tp; struct xfs_btree_cur *rcur = NULL; struct xfs_mount *mp = lip->li_log->l_mp; - xfs_fsblock_t new_fsb; - xfs_extlen_t new_len; unsigned int refc_type; bool requeue_only = false; - enum xfs_refcount_intent_type type; int i; int error = 0; @@ -505,14 +491,18 @@ xfs_cui_item_recover( * doesn't fit. We need to reserve enough blocks to handle a * full btree split on either end of the refcount range. */ - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, - mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp); + resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate); + error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0, + XFS_TRANS_RESERVE, &tp); if (error) return error; cudp = xfs_trans_get_cud(tp, cuip); for (i = 0; i < cuip->cui_format.cui_nextents; i++) { + struct xfs_refcount_intent fake = { }; + struct xfs_phys_extent *refc; + refc = &cuip->cui_format.cui_extents[i]; refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK; switch (refc_type) { @@ -520,7 +510,7 @@ xfs_cui_item_recover( case XFS_REFCOUNT_DECREASE: case XFS_REFCOUNT_ALLOC_COW: case XFS_REFCOUNT_FREE_COW: - type = refc_type; + fake.ri_type = refc_type; break; default: XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, @@ -529,13 +519,12 @@ xfs_cui_item_recover( error = -EFSCORRUPTED; goto abort_error; } - if (requeue_only) { - new_fsb = refc->pe_startblock; - new_len = refc->pe_len; - } else + + fake.ri_startblock = refc->pe_startblock; + fake.ri_blockcount = refc->pe_len; + if (!requeue_only) error = xfs_trans_log_finish_refcount_update(tp, cudp, - type, refc->pe_startblock, refc->pe_len, - &new_fsb, &new_len, &rcur); + &fake, &rcur); if (error == -EFSCORRUPTED) XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, &cuip->cui_format, @@ -544,10 +533,13 @@ xfs_cui_item_recover( goto abort_error; /* Requeue what we didn't finish. */ - if (new_len > 0) { - irec.br_startblock = new_fsb; - irec.br_blockcount = new_len; - switch (type) { + if (fake.ri_blockcount > 0) { + struct xfs_bmbt_irec irec = { + .br_startblock = fake.ri_startblock, + .br_blockcount = fake.ri_blockcount, + }; + + switch (fake.ri_type) { case XFS_REFCOUNT_INCREASE: xfs_refcount_increase_extent(tp, &irec); break; diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index fe46bce8cae6..d539487eaf1a 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -431,13 +431,6 @@ xfs_reflink_fill_cow_hole( if (error) return error; - /* - * Allocation succeeded but the requested range was not even partially - * satisfied? Bail out! - */ - if (nimaps == 0) - return -ENOSPC; - convert: return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now); @@ -500,13 +493,6 @@ xfs_reflink_fill_delalloc( error = xfs_trans_commit(tp); if (error) return error; - - /* - * Allocation succeeded but the requested range was not even - * partially satisfied? Bail out! - */ - if (nimaps == 0) - return -ENOSPC; } while (cmap->br_startoff + cmap->br_blockcount <= imap->br_startoff); return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now); @@ -618,8 +604,11 @@ xfs_reflink_cancel_cow_blocks( xfs_refcount_free_cow_extent(*tpp, del.br_startblock, del.br_blockcount); - xfs_free_extent_later(*tpp, del.br_startblock, - del.br_blockcount, NULL); + error = xfs_free_extent_later(*tpp, del.br_startblock, + del.br_blockcount, NULL, + XFS_AG_RESV_NONE); + if (error) + break; /* Roll the transaction */ error = xfs_defer_finish(tpp); @@ -729,12 +718,6 @@ xfs_reflink_end_cow_extent( int nmaps; int error; - /* No COW extents? That's easy! */ - if (ifp->if_bytes == 0) { - *offset_fsb = end_fsb; - return 0; - } - resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, XFS_TRANS_RESERVE, &tp); @@ -783,6 +766,7 @@ xfs_reflink_end_cow_extent( } } del = got; + xfs_trim_extent(&del, *offset_fsb, end_fsb - *offset_fsb); /* Grab the corresponding mapping in the data fork. */ nmaps = 1; @@ -1539,6 +1523,10 @@ xfs_reflink_remap_prep( if (ret) goto out_unlock; + xfs_iflags_set(src, XFS_IREMAPPING); + if (inode_in != inode_out) + xfs_ilock_demote(src, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); + return 0; out_unlock: xfs_iunlock2_io_mmap(src, dest); diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index 534504ede1a3..2043cea261c0 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -492,6 +492,7 @@ xfs_rui_item_recover( struct xfs_log_item *lip, struct list_head *capture_list) { + struct xfs_trans_res resv; struct xfs_rui_log_item *ruip = RUI_ITEM(lip); struct xfs_map_extent *rmap; struct xfs_rud_log_item *rudp; @@ -519,8 +520,9 @@ xfs_rui_item_recover( } } - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, - mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp); + resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate); + error = xfs_trans_alloc(mp, &resv, mp->m_rmap_maxlevels, 0, + XFS_TRANS_RESERVE, &tp); if (error) return error; rudp = xfs_trans_get_rud(tp, ruip); diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c index 292d5e54a92c..5cf1e91f4c20 100644 --- a/fs/xfs/xfs_rtalloc.c +++ b/fs/xfs/xfs_rtalloc.c @@ -19,6 +19,7 @@ #include "xfs_icache.h" #include "xfs_rtalloc.h" #include "xfs_sb.h" +#include "xfs_rtbitmap.h" /* * Read and return the summary information for a given extent size, @@ -211,6 +212,23 @@ xfs_rtallocate_range( return error; } +/* + * Make sure we don't run off the end of the rt volume. Be careful that + * adjusting maxlen downwards doesn't cause us to fail the alignment checks. + */ +static inline xfs_extlen_t +xfs_rtallocate_clamp_len( + struct xfs_mount *mp, + xfs_rtblock_t startrtx, + xfs_extlen_t rtxlen, + xfs_extlen_t prod) +{ + xfs_extlen_t ret; + + ret = min(mp->m_sb.sb_rextents, startrtx + rtxlen) - startrtx; + return rounddown(ret, prod); +} + /* * Attempt to allocate an extent minlen<=len<=maxlen starting from * bitmap block bbno. If we don't get maxlen then use prod to trim @@ -248,7 +266,7 @@ xfs_rtallocate_extent_block( i <= end; i++) { /* Make sure we don't scan off the end of the rt volume. */ - maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i; + maxlen = xfs_rtallocate_clamp_len(mp, i, maxlen, prod); /* * See if there's a free extent of maxlen starting at i. @@ -300,7 +318,7 @@ xfs_rtallocate_extent_block( /* * Searched the whole thing & didn't find a maxlen free extent. */ - if (minlen < maxlen && besti != -1) { + if (minlen <= maxlen && besti != -1) { xfs_extlen_t p; /* amount to trim length by */ /* @@ -355,7 +373,8 @@ xfs_rtallocate_extent_exact( int isfree; /* extent is free */ xfs_rtblock_t next; /* next block to try (dummy) */ - ASSERT(minlen % prod == 0 && maxlen % prod == 0); + ASSERT(minlen % prod == 0); + ASSERT(maxlen % prod == 0); /* * Check if the range in question (for maxlen) is free. */ @@ -438,7 +457,9 @@ xfs_rtallocate_extent_near( xfs_rtblock_t n; /* next block to try */ xfs_rtblock_t r; /* result block */ - ASSERT(minlen % prod == 0 && maxlen % prod == 0); + ASSERT(minlen % prod == 0); + ASSERT(maxlen % prod == 0); + /* * If the block number given is off the end, silently set it to * the last block. @@ -447,7 +468,7 @@ xfs_rtallocate_extent_near( bno = mp->m_sb.sb_rextents - 1; /* Make sure we don't run off the end of the rt volume. */ - maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno; + maxlen = xfs_rtallocate_clamp_len(mp, bno, maxlen, prod); if (maxlen < minlen) { *rtblock = NULLRTBLOCK; return 0; @@ -638,7 +659,8 @@ xfs_rtallocate_extent_size( xfs_rtblock_t r; /* result block number */ xfs_suminfo_t sum; /* summary information for extents */ - ASSERT(minlen % prod == 0 && maxlen % prod == 0); + ASSERT(minlen % prod == 0); + ASSERT(maxlen % prod == 0); ASSERT(maxlen != 0); /* @@ -818,8 +840,6 @@ xfs_growfs_rt_alloc( nmap = 1; error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks, XFS_BMAPI_METADATA, 0, &map, &nmap); - if (!error && nmap < 1) - error = -ENOSPC; if (error) goto out_trans_cancel; /* @@ -954,7 +974,7 @@ xfs_growfs_rt( return -EINVAL; /* Unsupported realtime features. */ - if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp)) + if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp) || xfs_has_quota(mp)) return -EOPNOTSUPP; nrblocks = in->newblocks; @@ -976,8 +996,10 @@ xfs_growfs_rt( */ nrextents = nrblocks; do_div(nrextents, in->extsize); + if (!xfs_validate_rtextents(nrextents)) + return -EINVAL; nrbmblocks = howmany_64(nrextents, NBBY * sbp->sb_blocksize); - nrextslog = xfs_highbit32(nrextents); + nrextslog = xfs_compute_rextslog(nrextents); nrsumlevels = nrextslog + 1; nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks; nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize); @@ -1039,13 +1061,16 @@ xfs_growfs_rt( nsbp->sb_rextents = nsbp->sb_rblocks; do_div(nsbp->sb_rextents, nsbp->sb_rextsize); ASSERT(nsbp->sb_rextents != 0); - nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents); + nsbp->sb_rextslog = xfs_compute_rextslog(nsbp->sb_rextents); nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1; nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nsbp->sb_rbmblocks; nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize); nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks); + /* recompute growfsrt reservation from new rsumsize */ + xfs_trans_resv_calc(nmp, &nmp->m_resv); + /* * Start a transaction, get the log reservation. */ @@ -1129,6 +1154,8 @@ error_cancel: */ mp->m_rsumlevels = nrsumlevels; mp->m_rsumsize = nrsumsize; + /* recompute growfsrt reservation from new rsumsize */ + xfs_trans_resv_calc(mp, &mp->m_resv); error = xfs_trans_commit(tp); if (error) diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h index 62c7ad79cbb6..11859c259a1c 100644 --- a/fs/xfs/xfs_rtalloc.h +++ b/fs/xfs/xfs_rtalloc.h @@ -11,22 +11,6 @@ struct xfs_mount; struct xfs_trans; -/* - * XXX: Most of the realtime allocation functions deal in units of realtime - * extents, not realtime blocks. This looks funny when paired with the type - * name and screams for a larger cleanup. - */ -struct xfs_rtalloc_rec { - xfs_rtblock_t ar_startext; - xfs_rtblock_t ar_extcount; -}; - -typedef int (*xfs_rtalloc_query_range_fn)( - struct xfs_mount *mp, - struct xfs_trans *tp, - const struct xfs_rtalloc_rec *rec, - void *priv); - #ifdef CONFIG_XFS_RT /* * Function prototypes for exported functions. @@ -48,15 +32,6 @@ xfs_rtallocate_extent( xfs_extlen_t prod, /* extent product factor */ xfs_rtblock_t *rtblock); /* out: start block allocated */ -/* - * Free an extent in the realtime subvolume. Length is expressed in - * realtime extents, as is the block number. - */ -int /* error */ -xfs_rtfree_extent( - struct xfs_trans *tp, /* transaction pointer */ - xfs_rtblock_t bno, /* starting block number to free */ - xfs_extlen_t len); /* length of extent freed */ /* * Initialize realtime fields in the mount structure. @@ -98,55 +73,12 @@ xfs_growfs_rt( struct xfs_mount *mp, /* file system mount structure */ xfs_growfs_rt_t *in); /* user supplied growfs struct */ -/* - * From xfs_rtbitmap.c - */ -int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_rtblock_t block, int issum, struct xfs_buf **bpp); -int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_rtblock_t start, xfs_extlen_t len, int val, - xfs_rtblock_t *new, int *stat); -int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_rtblock_t start, xfs_rtblock_t limit, - xfs_rtblock_t *rtblock); -int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_rtblock_t start, xfs_rtblock_t limit, - xfs_rtblock_t *rtblock); -int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_rtblock_t start, xfs_extlen_t len, int val); -int xfs_rtmodify_summary_int(struct xfs_mount *mp, struct xfs_trans *tp, - int log, xfs_rtblock_t bbno, int delta, - struct xfs_buf **rbpp, xfs_fsblock_t *rsb, - xfs_suminfo_t *sum); -int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log, - xfs_rtblock_t bbno, int delta, struct xfs_buf **rbpp, - xfs_fsblock_t *rsb); -int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_rtblock_t start, xfs_extlen_t len, - struct xfs_buf **rbpp, xfs_fsblock_t *rsb); -int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp, - const struct xfs_rtalloc_rec *low_rec, - const struct xfs_rtalloc_rec *high_rec, - xfs_rtalloc_query_range_fn fn, void *priv); -int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_rtalloc_query_range_fn fn, - void *priv); -bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno); -int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp, - xfs_rtblock_t start, xfs_extlen_t len, - bool *is_free); int xfs_rtalloc_reinit_frextents(struct xfs_mount *mp); #else -# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (ENOSYS) -# define xfs_rtfree_extent(t,b,l) (ENOSYS) -# define xfs_rtpick_extent(m,t,l,rb) (ENOSYS) -# define xfs_growfs_rt(mp,in) (ENOSYS) -# define xfs_rtalloc_query_range(t,l,h,f,p) (ENOSYS) -# define xfs_rtalloc_query_all(m,t,f,p) (ENOSYS) -# define xfs_rtbuf_get(m,t,b,i,p) (ENOSYS) -# define xfs_verify_rtbno(m, r) (false) -# define xfs_rtalloc_extent_is_free(m,t,s,l,i) (ENOSYS) -# define xfs_rtalloc_reinit_frextents(m) (0) +# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb) (-ENOSYS) +# define xfs_rtpick_extent(m,t,l,rb) (-ENOSYS) +# define xfs_growfs_rt(mp,in) (-ENOSYS) +# define xfs_rtalloc_reinit_frextents(m) (0) static inline int /* error */ xfs_rtmount_init( xfs_mount_t *mp) /* file system mount structure */ @@ -157,7 +89,7 @@ xfs_rtmount_init( xfs_warn(mp, "Not built with CONFIG_XFS_RT"); return -ENOSYS; } -# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS)) +# define xfs_rtmount_inodes(m) (((mp)->m_sb.sb_rblocks == 0)? 0 : (-ENOSYS)) # define xfs_rtunmount_inodes(m) #endif /* CONFIG_XFS_RT */ diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 1c143c69da6e..2ef331132fca 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -849,12 +849,6 @@ xfs_fs_statfs( ffree = statp->f_files - (icount - ifree); statp->f_ffree = max_t(int64_t, ffree, 0); - - if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && - ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == - (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) - xfs_qm_statvfs(ip, statp); - if (XFS_IS_REALTIME_MOUNT(mp) && (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) { s64 freertx; @@ -864,6 +858,11 @@ xfs_fs_statfs( statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize; } + if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) && + ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) == + (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD)) + xfs_qm_statvfs(ip, statp); + return 0; } diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 0cd62031e53f..20e2ec8b73aa 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -3208,17 +3208,14 @@ DEFINE_REFCOUNT_DEFERRED_EVENT(xfs_refcount_deferred); TRACE_EVENT(xfs_refcount_finish_one_leftover, TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, - int type, xfs_agblock_t agbno, xfs_extlen_t len, - xfs_agblock_t new_agbno, xfs_extlen_t new_len), - TP_ARGS(mp, agno, type, agbno, len, new_agbno, new_len), + int type, xfs_agblock_t agbno, xfs_extlen_t len), + TP_ARGS(mp, agno, type, agbno, len), TP_STRUCT__entry( __field(dev_t, dev) __field(xfs_agnumber_t, agno) __field(int, type) __field(xfs_agblock_t, agbno) __field(xfs_extlen_t, len) - __field(xfs_agblock_t, new_agbno) - __field(xfs_extlen_t, new_len) ), TP_fast_assign( __entry->dev = mp->m_super->s_dev; @@ -3226,17 +3223,13 @@ TRACE_EVENT(xfs_refcount_finish_one_leftover, __entry->type = type; __entry->agbno = agbno; __entry->len = len; - __entry->new_agbno = new_agbno; - __entry->new_len = new_len; ), - TP_printk("dev %d:%d type %d agno 0x%x agbno 0x%x fsbcount 0x%x new_agbno 0x%x new_fsbcount 0x%x", + TP_printk("dev %d:%d type %d agno 0x%x agbno 0x%x fsbcount 0x%x", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->type, __entry->agno, __entry->agbno, - __entry->len, - __entry->new_agbno, - __entry->new_len) + __entry->len) ); /* simple inode-based error/%ip tracepoint class */ diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index b45879868f90..24233e0b57fd 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -955,13 +955,6 @@ __xfs_trans_commit( trace_xfs_trans_commit(tp, _RET_IP_); - error = xfs_trans_run_precommits(tp); - if (error) { - if (tp->t_flags & XFS_TRANS_PERM_LOG_RES) - xfs_defer_cancel(tp); - goto out_unreserve; - } - /* * Finish deferred items on final commit. Only permanent transactions * should ever have deferred ops. @@ -972,13 +965,12 @@ __xfs_trans_commit( error = xfs_defer_finish_noroll(&tp); if (error) goto out_unreserve; - - /* Run precommits from final tx in defer chain. */ - error = xfs_trans_run_precommits(tp); - if (error) - goto out_unreserve; } + error = xfs_trans_run_precommits(tp); + if (error) + goto out_unreserve; + /* * If there is nothing to be logged by the transaction, * then unlock all of the items associated with the diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 2810555d67ac..2d12bf937a26 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -461,7 +461,7 @@ . = ALIGN((align)); \ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ __start_rodata = .; \ - *(.rodata) *(.rodata.*) \ + *(.rodata) *(.rodata.*) *(.data.rel.ro*) \ SCHED_DATA \ RO_AFTER_INIT_DATA /* Read only after init */ \ . = ALIGN(8); \ diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h index b3f5d73ae1d6..6668e92b1cc4 100644 --- a/include/clocksource/hyperv_timer.h +++ b/include/clocksource/hyperv_timer.h @@ -34,6 +34,8 @@ extern void hv_init_clocksource(void); extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void); +extern void hv_adj_sched_clock_offset(u64 offset); + static inline notrace u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg, u64 *cur_tsc) { diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 40e855c8407c..642995704b69 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -220,6 +220,13 @@ struct drm_dp_mst_branch { */ struct list_head destroy_next; + /** + * @rad: Relative Address of the MST branch. + * For &drm_dp_mst_topology_mgr.mst_primary, it's rad[8] are all 0, + * unset and unused. For MST branches connected after mst_primary, + * in each element of rad[] the nibbles are ordered by the most + * signifcant 4 bits first and the least significant 4 bits second. + */ u8 rad[8]; u8 lct; int num_ports; @@ -694,6 +701,13 @@ struct drm_dp_mst_topology_mgr { */ bool payload_id_table_cleared : 1; + /** + * @reset_rx_state: The down request's reply and up request message + * receiver state must be reset, after the topology manager got + * removed. Protected by @lock. + */ + bool reset_rx_state : 1; + /** * @payload_count: The number of currently active payloads in hardware. This value is only * intended to be used internally by MST helpers for payload tracking, and is only safe to diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 10b1990bc1f6..36225aedf613 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -372,8 +372,27 @@ struct drm_atomic_state { * * Allow full modeset. This is used by the ATOMIC IOCTL handler to * implement the DRM_MODE_ATOMIC_ALLOW_MODESET flag. Drivers should - * never consult this flag, instead looking at the output of - * drm_atomic_crtc_needs_modeset(). + * generally not consult this flag, but instead look at the output of + * drm_atomic_crtc_needs_modeset(). The detailed rules are: + * + * - Drivers must not consult @allow_modeset in the atomic commit path. + * Use drm_atomic_crtc_needs_modeset() instead. + * + * - Drivers must consult @allow_modeset before adding unrelated struct + * drm_crtc_state to this commit by calling + * drm_atomic_get_crtc_state(). See also the warning in the + * documentation for that function. + * + * - Drivers must never change this flag, it is under the exclusive + * control of userspace. + * + * - Drivers may consult @allow_modeset in the atomic check path, if + * they have the choice between an optimal hardware configuration + * which requires a modeset, and a less optimal configuration which + * can be committed without a modeset. An example would be suboptimal + * scanout FIFO allocation resulting in increased idle power + * consumption. This allows userspace to avoid flickering and delays + * for the normal composition loop at reasonable cost. */ bool allow_modeset : 1; /** diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index b7d3f3843f1e..9cbdf8ce8a05 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -79,6 +79,12 @@ struct ttm_tt { * page_flags = TTM_TT_FLAG_EXTERNAL | * TTM_TT_FLAG_EXTERNAL_MAPPABLE; * + * TTM_TT_FLAG_DECRYPTED: The mapped ttm pages should be marked as + * not encrypted. The framework will try to match what the dma layer + * is doing, but note that it is a little fragile because ttm page + * fault handling abuses the DMA api a bit and dma_map_attrs can't be + * used to assure pgprot always matches. + * * TTM_TT_FLAG_PRIV_POPULATED: TTM internal only. DO NOT USE. This is * set by TTM after ttm_tt_populate() has successfully returned, and is * then unset when TTM calls ttm_tt_unpopulate(). @@ -87,6 +93,7 @@ struct ttm_tt { #define TTM_TT_FLAG_ZERO_ALLOC (1 << 1) #define TTM_TT_FLAG_EXTERNAL (1 << 2) #define TTM_TT_FLAG_EXTERNAL_MAPPABLE (1 << 3) +#define TTM_TT_FLAG_DECRYPTED (1 << 4) #define TTM_TT_FLAG_PRIV_POPULATED (1U << 31) uint32_t page_flags; diff --git a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h index 9f7c5103bc82..39f203256c4f 100644 --- a/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h +++ b/include/dt-bindings/sound/qcom,q6dsp-lpass-ports.h @@ -131,6 +131,14 @@ #define RX_CODEC_DMA_RX_7 126 #define QUINARY_MI2S_RX 127 #define QUINARY_MI2S_TX 128 +#define DISPLAY_PORT_RX_0 DISPLAY_PORT_RX +#define DISPLAY_PORT_RX_1 129 +#define DISPLAY_PORT_RX_2 130 +#define DISPLAY_PORT_RX_3 131 +#define DISPLAY_PORT_RX_4 132 +#define DISPLAY_PORT_RX_5 133 +#define DISPLAY_PORT_RX_6 134 +#define DISPLAY_PORT_RX_7 135 #define LPASS_CLK_ID_PRI_MI2S_IBIT 1 #define LPASS_CLK_ID_PRI_MI2S_EBIT 2 diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index f196c19f8e55..2d1d02eac500 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -224,8 +224,6 @@ u32 arm_smccc_get_version(void); void __init arm_smccc_version_init(u32 version, enum arm_smccc_conduit conduit); -extern u64 smccc_has_sve_hint; - /** * arm_smccc_get_soc_id_version() * @@ -323,15 +321,6 @@ struct arm_smccc_quirk { } state; }; -/** - * __arm_smccc_sve_check() - Set the SVE hint bit when doing SMC calls - * - * Sets the SMCCC hint bit to indicate if there is live state in the SVE - * registers, this modifies x0 in place and should never be called from C - * code. - */ -asmlinkage unsigned long __arm_smccc_sve_check(unsigned long x0); - /** * __arm_smccc_smc() - make SMC calls * @a0-a7: arguments passed in registers 0 to 7 @@ -399,20 +388,6 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #endif -/* nVHE hypervisor doesn't have a current thread so needs separate checks */ -#if defined(CONFIG_ARM64_SVE) && !defined(__KVM_NVHE_HYPERVISOR__) - -#define SMCCC_SVE_CHECK ALTERNATIVE("nop \n", "bl __arm_smccc_sve_check \n", \ - ARM64_SVE) -#define smccc_sve_clobbers "x16", "x30", "cc", - -#else - -#define SMCCC_SVE_CHECK -#define smccc_sve_clobbers - -#endif - #define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x #define __count_args(...) \ @@ -480,7 +455,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, #define ___constraints(count) \ : __constraint_read_ ## count \ - : smccc_sve_clobbers "memory" + : "memory" #define __constraints(count) ___constraints(count) /* @@ -495,8 +470,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, register unsigned long r2 asm("r2"); \ register unsigned long r3 asm("r3"); \ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ - asm volatile(SMCCC_SVE_CHECK \ - inst "\n" : \ + asm volatile(inst "\n" : \ "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \ __constraints(__count_args(__VA_ARGS__))); \ if (___res) \ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 439815cc1ab9..c451adbe82f0 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -241,6 +241,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) { #ifdef CONFIG_LOCKDEP WARN_ON_ONCE(debug_locks && + (inode->i_sb->s_iflags & SB_I_CGROUPWB) && (!lockdep_is_held(&inode->i_lock) && !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_wb->list_lock))); diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 8d51f69f9f5e..af9056d78fad 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h @@ -42,7 +42,9 @@ struct linux_binprm { * Set when errors can no longer be returned to the * original userspace. */ - point_of_no_return:1; + point_of_no_return:1, + /* Set when "comm" must come from the dentry. */ + comm_from_dentry:1; struct file *executable; /* Executable to pass to the interpreter */ struct file *interpreter; struct file *file; diff --git a/include/linux/bio.h b/include/linux/bio.h index 2b9fc5edf49d..00ab98ce1d43 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -11,6 +11,7 @@ #include #define BIO_MAX_VECS 256U +#define BIO_MAX_INLINE_VECS UIO_MAXIOV static inline unsigned int bio_max_segs(unsigned int nr_segs) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f77e8785802e..de013fc37ef8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1201,7 +1201,7 @@ static inline unsigned int queue_io_min(const struct request_queue *q) return q->limits.io_min; } -static inline int bdev_io_min(struct block_device *bdev) +static inline unsigned int bdev_io_min(struct block_device *bdev) { return queue_io_min(bdev_get_queue(bdev)); } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 6b18b8da025f..e9c1338851e3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -250,6 +250,7 @@ struct bpf_map { * same prog type, JITed flag and xdp_has_frags flag. */ struct { + const struct btf_type *attach_func_proto; spinlock_t lock; enum bpf_prog_type type; bool jited; @@ -464,6 +465,7 @@ enum bpf_type_flag { */ PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS), + /* MEM can be uninitialized. */ MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS), /* DYNPTR points to memory local to the bpf program. */ @@ -475,6 +477,18 @@ enum bpf_type_flag { /* Size is known at compile time. */ MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS), + /* Memory must be aligned on some architectures, used in combination with + * MEM_FIXED_SIZE. + */ + MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS), + + /* MEM is being written to, often combined with MEM_UNINIT. Non-presence + * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the + * MEM_UNINIT means that memory needs to be initialized since it is also + * read. + */ + MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; @@ -510,8 +524,6 @@ enum bpf_arg_type { ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ - ARG_PTR_TO_INT, /* pointer to int */ - ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */ @@ -534,10 +546,10 @@ enum bpf_arg_type { ARG_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_ALLOC_MEM, ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, - /* pointer to memory does not need to be initialized, helper function must fill - * all bytes or clear them in error case. + /* Pointer to memory does not need to be initialized, since helper function + * fills all bytes or clears them in error case. */ - ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM, + ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM, /* Pointer to valid memory of size known at compile time. */ ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM, diff --git a/include/linux/cache.h b/include/linux/cache.h index 5da1bbd96154..9900d20b76c2 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -98,4 +98,10 @@ struct cacheline_padding { #define CACHELINE_PADDING(name) #endif +#ifdef ARCH_DMA_MINALIGN +#define ARCH_HAS_DMA_MINALIGN +#else +#define ARCH_DMA_MINALIGN __alignof__(unsigned long long) +#endif + #endif /* __LINUX_CACHE_H */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index be8980b02355..6ab81c06afbc 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -71,9 +71,6 @@ enum { /* Cgroup is frozen. */ CGRP_FROZEN, - - /* Control group has to be killed. */ - CGRP_KILL, }; /* cgroup_root->flags */ @@ -424,6 +421,9 @@ struct cgroup { int nr_threaded_children; /* # of live threaded child cgroups */ + /* sequence number for cgroup.kill, serialized by css_set_lock. */ + unsigned int kill_seq; + struct kernfs_node *kn; /* cgroup kernfs entry */ struct cgroup_file procs_file; /* handle for "cgroup.procs" */ struct cgroup_file events_file; /* handle for "cgroup.events" */ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 973a1bfd7ef5..fdfe8e6ff0b2 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -205,18 +205,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, #endif /* __KERNEL__ */ -/* - * Force the compiler to emit 'sym' as a symbol, so that we can reference - * it from inline assembler. Necessary in case 'sym' could be inlined - * otherwise, or eliminated entirely due to lack of references that are - * visible to the compiler. - */ -#define ___ADDRESSABLE(sym, __attrs) \ - static void * __used __attrs \ - __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; -#define __ADDRESSABLE(sym) \ - ___ADDRESSABLE(sym, __section(".discard.addressable")) - /** * offset_to_ptr - convert a relative memory offset to an absolute pointer * @off: the address of the 32-bit offset value @@ -228,6 +216,33 @@ static inline void *offset_to_ptr(const int *off) #endif /* __ASSEMBLY__ */ +#ifdef CONFIG_64BIT +#define ARCH_SEL(a,b) a +#else +#define ARCH_SEL(a,b) b +#endif + +/* + * Force the compiler to emit 'sym' as a symbol, so that we can reference + * it from inline assembler. Necessary in case 'sym' could be inlined + * otherwise, or eliminated entirely due to lack of references that are + * visible to the compiler. + */ +#define ___ADDRESSABLE(sym, __attrs) \ + static void * __used __attrs \ + __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; + +#define __ADDRESSABLE(sym) \ + ___ADDRESSABLE(sym, __section(".discard.addressable")) + +#define __ADDRESSABLE_ASM(sym) \ + .pushsection .discard.addressable,"aw"; \ + .align ARCH_SEL(8,4); \ + ARCH_SEL(.quad, .long) __stringify(sym); \ + .popsection; + +#define __ADDRESSABLE_ASM_STR(sym) __stringify(__ADDRESSABLE_ASM(sym)) + /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) diff --git a/include/linux/context_tracking_irq.h b/include/linux/context_tracking_irq.h index c50b5670c4a5..197916ee91a4 100644 --- a/include/linux/context_tracking_irq.h +++ b/include/linux/context_tracking_irq.h @@ -10,12 +10,12 @@ void ct_irq_exit_irqson(void); void ct_nmi_enter(void); void ct_nmi_exit(void); #else -static inline void ct_irq_enter(void) { } -static inline void ct_irq_exit(void) { } +static __always_inline void ct_irq_enter(void) { } +static __always_inline void ct_irq_exit(void) { } static inline void ct_irq_enter_irqson(void) { } static inline void ct_irq_exit_irqson(void) { } -static inline void ct_nmi_enter(void) { } -static inline void ct_nmi_exit(void) { } +static __always_inline void ct_nmi_enter(void) { } +static __always_inline void ct_nmi_exit(void) { } #endif #endif diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 08a1d3e7e46d..2b7ec58299ca 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -28,6 +28,7 @@ struct coredump_params { int vma_count; size_t vma_data_size; struct core_vma_metadata *vma_meta; + struct pid *pid; }; /* diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 1554021231f9..56b40efb51a6 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -502,6 +502,10 @@ extern int coresight_enable(struct coresight_device *csdev); extern void coresight_disable(struct coresight_device *csdev); extern int coresight_timeout(struct csdev_access *csa, u32 offset, int position, int value); +typedef void (*coresight_timeout_cb_t) (struct csdev_access *, u32, int, int); +extern int coresight_timeout_action(struct csdev_access *csa, u32 offset, + int position, int value, + coresight_timeout_cb_t cb); extern int coresight_claim_device(struct coresight_device *csdev); extern int coresight_claim_device_unlocked(struct coresight_device *csdev); diff --git a/include/linux/cpu.h b/include/linux/cpu.h index ea89c9777928..ecf745969bf2 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -76,6 +76,8 @@ extern ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_indirect_target_selection(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 3759d0a15c7b..d87a0d024693 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -789,8 +789,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy, int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy); int cpufreq_table_index_unsorted(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation); + unsigned int target_freq, unsigned int min, + unsigned int max, unsigned int relation); int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, unsigned int freq); @@ -855,12 +855,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, return best; } -/* Works only on sorted freq-tables */ -static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, - unsigned int target_freq, - bool efficiencies) +static inline int find_index_l(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int min, unsigned int max, + bool efficiencies) { - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) return cpufreq_table_find_index_al(policy, target_freq, @@ -870,6 +870,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, efficiencies); } +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy, + unsigned int target_freq, + bool efficiencies) +{ + return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies); +} + /* Find highest freq at or below target in a table in ascending order */ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, unsigned int target_freq, @@ -923,12 +931,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, return best; } -/* Works only on sorted freq-tables */ -static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, - unsigned int target_freq, - bool efficiencies) +static inline int find_index_h(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int min, unsigned int max, + bool efficiencies) { - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) return cpufreq_table_find_index_ah(policy, target_freq, @@ -938,6 +946,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, efficiencies); } +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy, + unsigned int target_freq, + bool efficiencies) +{ + return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies); +} + /* Find closest freq to target in a table in ascending order */ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, unsigned int target_freq, @@ -1008,12 +1024,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, return best; } -/* Works only on sorted freq-tables */ -static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, - unsigned int target_freq, - bool efficiencies) +static inline int find_index_c(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int min, unsigned int max, + bool efficiencies) { - target_freq = clamp_val(target_freq, policy->min, policy->max); + target_freq = clamp_val(target_freq, min, max); if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) return cpufreq_table_find_index_ac(policy, target_freq, @@ -1023,7 +1039,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, efficiencies); } -static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) +/* Works only on sorted freq-tables */ +static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy, + unsigned int target_freq, + bool efficiencies) +{ + return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies); +} + +static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, + unsigned int min, unsigned int max, + int idx) { unsigned int freq; @@ -1032,11 +1058,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx) freq = policy->freq_table[idx].frequency; - return freq == clamp_val(freq, policy->min, policy->max); + return freq == clamp_val(freq, min, max); } static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, unsigned int target_freq, + unsigned int min, + unsigned int max, unsigned int relation) { bool efficiencies = policy->efficiencies_available && @@ -1047,29 +1075,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy, relation &= ~CPUFREQ_RELATION_E; if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED)) - return cpufreq_table_index_unsorted(policy, target_freq, - relation); + return cpufreq_table_index_unsorted(policy, target_freq, min, + max, relation); retry: switch (relation) { case CPUFREQ_RELATION_L: - idx = cpufreq_table_find_index_l(policy, target_freq, - efficiencies); + idx = find_index_l(policy, target_freq, min, max, efficiencies); break; case CPUFREQ_RELATION_H: - idx = cpufreq_table_find_index_h(policy, target_freq, - efficiencies); + idx = find_index_h(policy, target_freq, min, max, efficiencies); break; case CPUFREQ_RELATION_C: - idx = cpufreq_table_find_index_c(policy, target_freq, - efficiencies); + idx = find_index_c(policy, target_freq, min, max, efficiencies); break; default: WARN_ON_ONCE(1); return 0; } - /* Limit frequency index to honor policy->min/max */ - if (!cpufreq_is_in_limits(policy, idx) && efficiencies) { + /* Limit frequency index to honor min and max */ + if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) { efficiencies = false; goto retry; } diff --git a/include/linux/crypto.h b/include/linux/crypto.h index e3c4be29aacc..d354a2a7ac5f 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -642,6 +642,7 @@ int crypto_has_alg(const char *name, u32 type, u32 mask); */ struct crypto_tfm { + refcount_t refcnt; u32 crt_flags; diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h index 4dc7cda4fd46..7fd704bb8f3d 100644 --- a/include/linux/devfreq.h +++ b/include/linux/devfreq.h @@ -273,8 +273,8 @@ void devm_devfreq_unregister_notifier(struct device *dev, struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node); struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, const char *phandle_name, int index); +#endif /* CONFIG_PM_DEVFREQ */ -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) /** * struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq * and devfreq_add_device @@ -292,9 +292,7 @@ struct devfreq_simple_ondemand_data { unsigned int upthreshold; unsigned int downdifferential; }; -#endif -#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE) enum devfreq_parent_dev_type { DEVFREQ_PARENT_DEV, CPUFREQ_PARENT_DEV, @@ -337,9 +335,8 @@ struct devfreq_passive_data { struct notifier_block nb; struct list_head cpu_data_list; }; -#endif -#else /* !CONFIG_PM_DEVFREQ */ +#if !defined(CONFIG_PM_DEVFREQ) static inline struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 0ee20b764000..af3f39ecc1b8 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -2,6 +2,7 @@ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H +#include #include #include #include @@ -543,13 +544,15 @@ static inline int dma_set_min_align_mask(struct device *dev, return 0; } +#ifndef dma_get_cache_alignment static inline int dma_get_cache_alignment(void) { -#ifdef ARCH_DMA_MINALIGN +#ifdef ARCH_HAS_DMA_MINALIGN return ARCH_DMA_MINALIGN; #endif return 1; } +#endif static inline void *dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) @@ -595,10 +598,14 @@ static inline int dma_mmap_wc(struct device *dev, #else #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) -#define dma_unmap_addr(PTR, ADDR_NAME) (0) -#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) -#define dma_unmap_len(PTR, LEN_NAME) (0) -#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) +#define dma_unmap_addr(PTR, ADDR_NAME) \ + ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) +#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) \ + do { typeof(PTR) __p __maybe_unused = PTR; } while (0) +#define dma_unmap_len(PTR, LEN_NAME) \ + ({ typeof(PTR) __p __maybe_unused = PTR; 0; }) +#define dma_unmap_len_set(PTR, LEN_NAME, VAL) \ + do { typeof(PTR) __p __maybe_unused = PTR; } while (0) #endif #endif /* _LINUX_DMA_MAPPING_H */ diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h index 6fbfbde68a37..620a3260fc08 100644 --- a/include/linux/dsa/ocelot.h +++ b/include/linux/dsa/ocelot.h @@ -15,6 +15,7 @@ struct ocelot_skb_cb { struct sk_buff *clone; unsigned int ptp_class; /* valid only for clones */ + unsigned long ptp_tx_time; /* valid only for clones */ u32 tstamp_lo; u8 ptp_cmd; u8 ts_id; diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index c860c72a921d..3a485cc0e0fa 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -11,6 +11,8 @@ Supported chipsets: 93c46, 93c56 and 93c66. */ +#include + /* * EEPROM operation defines. */ @@ -34,6 +36,7 @@ * @register_write(struct eeprom_93cx6 *eeprom): handler to * write to the eeprom register by using all reg_* fields. * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines + * @quirks: eeprom or controller quirks * @drive_data: Set if we're driving the data line. * @reg_data_in: register field to indicate data input * @reg_data_out: register field to indicate data output @@ -50,6 +53,9 @@ struct eeprom_93cx6 { void (*register_write)(struct eeprom_93cx6 *eeprom); int width; + unsigned int quirks; +/* Some EEPROMs require an extra clock cycle before reading */ +#define PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE BIT(0) char drive_data; char reg_data_in; @@ -71,3 +77,8 @@ extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data); + +static inline bool has_quirk_extra_read_cycle(struct eeprom_93cx6 *eeprom) +{ + return eeprom->quirks & PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE; +} diff --git a/include/linux/efi.h b/include/linux/efi.h index a849b533be5b..1d73c77051c0 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -125,6 +125,7 @@ typedef struct { #define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */ #define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */ #define EFI_MEMORY_CPU_CRYPTO ((u64)0x0000000000080000ULL) /* supports encryption */ +#define EFI_MEMORY_HOT_PLUGGABLE BIT_ULL(20) /* supports unplugging at runtime */ #define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */ #define EFI_MEMORY_DESCRIPTOR_VERSION 1 diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 3337745d81bd..0c0d00fcd131 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -42,7 +42,7 @@ static inline void eventpoll_release(struct file *file) * because the file in on the way to be removed and nobody ( but * eventpoll ) has still a reference to this file. */ - if (likely(!file->f_ep)) + if (likely(!READ_ONCE(file->f_ep))) return; /* diff --git a/include/linux/filter.h b/include/linux/filter.h index 01f97956572c..f3ef1a8965bb 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -775,7 +775,14 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, * under local_bh_disable(), which provides the needed RCU protection * for accessing map entries. */ - u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + u32 act; + + if (ri->map_id || ri->map_type) { + ri->map_id = 0; + ri->map_type = BPF_MAP_TYPE_UNSPEC; + } + act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h index 223da48a6d18..94c4edd047e5 100644 --- a/include/linux/fpga/fpga-bridge.h +++ b/include/linux/fpga/fpga-bridge.h @@ -45,6 +45,7 @@ struct fpga_bridge_info { * @dev: FPGA bridge device * @mutex: enforces exclusive reference to bridge * @br_ops: pointer to struct of FPGA bridge ops + * @br_ops_owner: module containing the br_ops * @info: fpga image specific information * @node: FPGA bridge list node * @priv: low level driver private date @@ -54,6 +55,7 @@ struct fpga_bridge { struct device dev; struct mutex mutex; /* for exclusive reference to bridge */ const struct fpga_bridge_ops *br_ops; + struct module *br_ops_owner; struct fpga_image_info *info; struct list_head node; void *priv; @@ -79,10 +81,12 @@ int of_fpga_bridge_get_to_list(struct device_node *np, struct fpga_image_info *info, struct list_head *bridge_list); +#define fpga_bridge_register(parent, name, br_ops, priv) \ + __fpga_bridge_register(parent, name, br_ops, priv, THIS_MODULE) struct fpga_bridge * -fpga_bridge_register(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, - void *priv); +__fpga_bridge_register(struct device *parent, const char *name, + const struct fpga_bridge_ops *br_ops, void *priv, + struct module *owner); void fpga_bridge_unregister(struct fpga_bridge *br); #endif /* _LINUX_FPGA_BRIDGE_H */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 54f63459efd6..0d4fe068f3d8 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -201,6 +201,7 @@ struct fpga_manager_ops { * @state: state of fpga manager * @compat_id: FPGA manager id for compatibility check. * @mops: pointer to struct of fpga manager ops + * @mops_owner: module containing the mops * @priv: low level driver private date */ struct fpga_manager { @@ -210,6 +211,7 @@ struct fpga_manager { enum fpga_mgr_states state; struct fpga_compat_id *compat_id; const struct fpga_manager_ops *mops; + struct module *mops_owner; void *priv; }; @@ -230,18 +232,30 @@ struct fpga_manager *fpga_mgr_get(struct device *dev); void fpga_mgr_put(struct fpga_manager *mgr); +#define fpga_mgr_register_full(parent, info) \ + __fpga_mgr_register_full(parent, info, THIS_MODULE) struct fpga_manager * -fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info); +__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner); +#define fpga_mgr_register(parent, name, mops, priv) \ + __fpga_mgr_register(parent, name, mops, priv, THIS_MODULE) struct fpga_manager * -fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv); +__fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, struct module *owner); + void fpga_mgr_unregister(struct fpga_manager *mgr); +#define devm_fpga_mgr_register_full(parent, info) \ + __devm_fpga_mgr_register_full(parent, info, THIS_MODULE) struct fpga_manager * -devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info); +__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner); +#define devm_fpga_mgr_register(parent, name, mops, priv) \ + __devm_fpga_mgr_register(parent, name, mops, priv, THIS_MODULE) struct fpga_manager * -devm_fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv); +__devm_fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, + struct module *owner); #endif /*_LINUX_FPGA_MGR_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 0d32634c5cf0..08fba309ddc7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3385,6 +3385,8 @@ extern const struct file_operations generic_ro_fops; extern int readlink_copy(char __user *, int, const char *); extern int page_readlink(struct dentry *, char __user *, int); +extern const char *page_get_link_raw(struct dentry *, struct inode *, + struct delayed_call *); extern const char *page_get_link(struct dentry *, struct inode *, struct delayed_call *); extern void page_put_link(void *); diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 5700451b300f..7efb4493e51c 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h @@ -51,8 +51,10 @@ struct fwnode_handle { * fwnode link flags * * CYCLE: The fwnode link is part of a cycle. Don't defer probe. + * IGNORE: Completely ignore this link, even during cycle detection. */ #define FWLINK_FLAG_CYCLE BIT(0) +#define FWLINK_FLAG_IGNORE BIT(1) struct fwnode_link { struct fwnode_handle *supplier; @@ -81,7 +83,7 @@ struct fwnode_endpoint { #define SWNODE_GRAPH_PORT_NAME_FMT "port@%u" #define SWNODE_GRAPH_ENDPOINT_NAME_FMT "endpoint@%u" -#define NR_FWNODE_REFERENCE_ARGS 8 +#define NR_FWNODE_REFERENCE_ARGS 16 /** * struct fwnode_reference_args - Fwnode reference with additional arguments diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index b566ae420449..50b6f30adf54 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -226,6 +226,12 @@ struct hisi_qm_status { struct hisi_qm; +enum acc_err_result { + ACC_ERR_NONE, + ACC_ERR_NEED_RESET, + ACC_ERR_RECOVERED, +}; + struct hisi_qm_err_info { char *acpi_rst; u32 msi_wr_port; @@ -254,9 +260,9 @@ struct hisi_qm_err_ini { void (*close_axi_master_ooo)(struct hisi_qm *qm); void (*open_sva_prefetch)(struct hisi_qm *qm); void (*close_sva_prefetch)(struct hisi_qm *qm); - void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); void (*show_last_dfx_regs)(struct hisi_qm *qm); void (*err_info_init)(struct hisi_qm *qm); + enum acc_err_result (*get_err_result)(struct hisi_qm *qm); }; struct hisi_qm_cap_info { diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 254d4a898179..05f8b7d7d1e9 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -237,6 +237,7 @@ struct hrtimer_cpu_base { ktime_t softirq_expires_next; struct hrtimer *softirq_next_timer; struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + call_single_data_t csd; } ____cacheline_aligned; static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) @@ -532,6 +533,7 @@ extern void __init hrtimers_init(void); extern void sysrq_timer_list_show(void); int hrtimers_prepare_cpu(unsigned int cpu); +int hrtimers_cpu_starting(unsigned int cpu); #ifdef CONFIG_HOTPLUG_CPU int hrtimers_cpu_dying(unsigned int cpu); #else diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 811d59cf891b..80d876593caa 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -1224,13 +1224,6 @@ extern int vmbus_sendpacket(struct vmbus_channel *channel, enum vmbus_packet_type type, u32 flags); -extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, - struct hv_page_buffer pagebuffers[], - u32 pagecount, - void *buffer, - u32 bufferlen, - u64 requestid); - extern int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct vmbus_packet_mpb_array *mpb, u32 desc_size, @@ -1567,6 +1560,7 @@ struct hv_util_service { void *channel; void (*util_cb)(void *); int (*util_init)(struct hv_util_service *); + int (*util_init_transport)(void); void (*util_deinit)(void); int (*util_pre_suspend)(void); int (*util_pre_resume)(void); diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 604a126b78c8..2f731c6c16ea 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -268,6 +268,20 @@ enum i3c_bus_mode { I3C_BUS_MODE_MIXED_SLOW, }; +/** + * enum i3c_open_drain_speed - I3C open-drain speed + * @I3C_OPEN_DRAIN_SLOW_SPEED: Slow open-drain speed for sending the first + * broadcast address. The first broadcast address at this speed + * will be visible to all devices on the I3C bus. I3C devices + * working in I2C mode will turn off their spike filter when + * switching into I3C mode. + * @I3C_OPEN_DRAIN_NORMAL_SPEED: Normal open-drain speed in I3C bus mode. + */ +enum i3c_open_drain_speed { + I3C_OPEN_DRAIN_SLOW_SPEED, + I3C_OPEN_DRAIN_NORMAL_SPEED, +}; + /** * enum i3c_addr_slot_status - I3C address slot status * @I3C_ADDR_SLOT_FREE: address is free @@ -275,7 +289,8 @@ enum i3c_bus_mode { * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask - * + * @I3C_ADDR_SLOT_EXT_DESIRED: the bitmask represents addresses that are preferred by some devices, + * such as the "assigned-address" property in a device tree source. * On an I3C bus, addresses are assigned dynamically, and we need to know which * addresses are free to use and which ones are already assigned. * @@ -288,8 +303,12 @@ enum i3c_addr_slot_status { I3C_ADDR_SLOT_I2C_DEV, I3C_ADDR_SLOT_I3C_DEV, I3C_ADDR_SLOT_STATUS_MASK = 3, + I3C_ADDR_SLOT_EXT_STATUS_MASK = 7, + I3C_ADDR_SLOT_EXT_DESIRED = BIT(2), }; +#define I3C_ADDR_SLOT_STATUS_BITS 4 + /** * struct i3c_bus - I3C bus object * @cur_master: I3C master currently driving the bus. Since I3C is multi-master @@ -331,7 +350,7 @@ enum i3c_addr_slot_status { struct i3c_bus { struct i3c_dev_desc *cur_master; int id; - unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG]; + unsigned long addrslots[((I2C_MAX_ADDR + 1) * I3C_ADDR_SLOT_STATUS_BITS) / BITS_PER_LONG]; enum i3c_bus_mode mode; struct { unsigned long i3c; @@ -425,6 +444,9 @@ struct i3c_bus { * for a future IBI * This method is mandatory only if ->request_ibi is not * NULL. + * @enable_hotjoin: enable hot join event detect. + * @disable_hotjoin: disable hot join event detect. + * @set_speed: adjust I3C open drain mode timing. */ struct i3c_master_controller_ops { int (*bus_init)(struct i3c_master_controller *master); @@ -451,6 +473,9 @@ struct i3c_master_controller_ops { int (*disable_ibi)(struct i3c_dev_desc *dev); void (*recycle_ibi_slot)(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot); + int (*enable_hotjoin)(struct i3c_master_controller *master); + int (*disable_hotjoin)(struct i3c_master_controller *master); + int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed); }; /** @@ -464,6 +489,7 @@ struct i3c_master_controller_ops { * @ops: master operations. See &struct i3c_master_controller_ops * @secondary: true if the master is a secondary master * @init_done: true when the bus initialization is done + * @hotjoin: true if the master support hotjoin * @boardinfo.i3c: list of I3C boardinfo objects * @boardinfo.i2c: list of I2C boardinfo objects * @boardinfo: board-level information attached to devices connected on the bus @@ -486,6 +512,7 @@ struct i3c_master_controller { const struct i3c_master_controller_ops *ops; unsigned int secondary : 1; unsigned int init_done : 1; + unsigned int hotjoin: 1; struct { struct list_head i3c; struct list_head i2c; @@ -541,7 +568,9 @@ int i3c_master_register(struct i3c_master_controller *master, struct device *parent, const struct i3c_master_controller_ops *ops, bool secondary); -int i3c_master_unregister(struct i3c_master_controller *master); +void i3c_master_unregister(struct i3c_master_controller *master); +int i3c_master_enable_hotjoin(struct i3c_master_controller *master); +int i3c_master_disable_hotjoin(struct i3c_master_controller *master); /** * i3c_dev_get_master_data() - get master private data attached to an I3C diff --git a/include/linux/i8253.h b/include/linux/i8253.h index 8336b2f6f834..56c280eb2d4f 100644 --- a/include/linux/i8253.h +++ b/include/linux/i8253.h @@ -21,9 +21,9 @@ #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) extern raw_spinlock_t i8253_lock; -extern bool i8253_clear_counter_on_shutdown; extern struct clock_event_device i8253_clockevent; extern void clockevent_i8253_init(bool oneshot); +extern void clockevent_i8253_disable(void); extern void setup_pit_timer(void); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 160230bb1a9c..8e00918b15b4 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -4571,28 +4571,24 @@ static inline u8 ieee80211_mle_common_size(const u8 *data) { const struct ieee80211_multi_link_elem *mle = (const void *)data; u16 control = le16_to_cpu(mle->control); - u8 common = 0; switch (u16_get_bits(control, IEEE80211_ML_CONTROL_TYPE)) { case IEEE80211_ML_CONTROL_TYPE_BASIC: case IEEE80211_ML_CONTROL_TYPE_PREQ: case IEEE80211_ML_CONTROL_TYPE_TDLS: case IEEE80211_ML_CONTROL_TYPE_RECONF: + case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: /* * The length is the first octet pointed by mle->variable so no * need to add anything */ break; - case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: - if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR) - common += ETH_ALEN; - return common; default: WARN_ON(1); return 0; } - return sizeof(*mle) + common + mle->variable[0]; + return sizeof(*mle) + mle->variable[0]; } /** @@ -4645,8 +4641,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, u8 len) check_common_len = true; break; case IEEE80211_ML_CONTROL_TYPE_PRIO_ACCESS: - if (control & IEEE80211_MLC_PRIO_ACCESS_PRES_AP_MLD_MAC_ADDR) - common += ETH_ALEN; + common = ETH_ALEN + 1; break; default: /* we don't know this type */ diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 83266201746c..9f7dbbb34094 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -587,13 +587,16 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query * @type: first vlan protocol + * @mac_offset: MAC offset * @depth: buffer to store length of eth and vlan tags in bytes * * Returns the EtherType of the packet, regardless of whether it is * vlan encapsulated (normal or hardware accelerated) or not. */ -static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, - int *depth) +static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb, + __be16 type, + int mac_offset, + int *depth) { unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH; @@ -612,7 +615,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, do { struct vlan_hdr vhdr, *vh; - vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr); + vh = skb_header_pointer(skb, mac_offset + vlan_depth, + sizeof(vhdr), &vhdr); if (unlikely(!vh || !--parse_depth)) return 0; @@ -627,6 +631,12 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, return type; } +static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type, + int *depth) +{ + return __vlan_get_protocol_offset(skb, type, 0, depth); +} + /** * vlan_get_protocol - get protocol EtherType. * @skb: skbuff to query diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4a1dc88ddbff..2610a7d156da 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -441,7 +441,7 @@ irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, static inline void disable_irq_nosync_lockdep(unsigned int irq) { disable_irq_nosync(irq); -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_disable(); #endif } @@ -449,7 +449,7 @@ static inline void disable_irq_nosync_lockdep(unsigned int irq) static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags) { disable_irq_nosync(irq); -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_save(*flags); #endif } @@ -464,7 +464,7 @@ static inline void disable_irq_lockdep(unsigned int irq) static inline void enable_irq_lockdep(unsigned int irq) { -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_enable(); #endif enable_irq(irq); @@ -472,7 +472,7 @@ static inline void enable_irq_lockdep(unsigned int irq) static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags) { -#ifdef CONFIG_LOCKDEP +#if defined(CONFIG_LOCKDEP) && !defined(CONFIG_PREEMPT_RT) local_irq_restore(*flags); #endif enable_irq(irq); diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index a8f3058448ea..e0dec1bf917a 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -50,10 +50,8 @@ bool io_is_uring_fops(struct file *file); static inline void io_uring_files_cancel(void) { - if (current->io_uring) { - io_uring_unreg_ringfd(); + if (current->io_uring) __io_uring_cancel(false); - } } static inline void io_uring_task_cancel(void) { diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 37aeea266ebb..2bd01109e0ec 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -352,6 +352,11 @@ struct io_ring_ctx { unsigned sq_thread_idle; /* protected by ->completion_lock */ unsigned evfd_last_cq_tail; + + unsigned short n_ring_pages; + unsigned short n_sqe_pages; + struct page **ring_pages; + struct page **sqe_pages; }; enum { diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 9d87090953bc..2bfa9611be67 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -999,7 +999,7 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) static inline struct iommu_sva * iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) { - return NULL; + return ERR_PTR(-ENODEV); } static inline void iommu_sva_unbind_device(struct iommu_sva *handle) diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 9a44de45cc1f..9f27e004127b 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -199,6 +199,7 @@ struct inet6_cork { struct ipv6_txoptions *opt; u8 hop_limit; u8 tclass; + u8 dontfrag:1; }; /** diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 5e13f801c902..3778e26f7b14 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -349,7 +349,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m) * - all other values are converted to jiffies by either multiplying * the input value by a factor or dividing it with a factor and * handling any 32-bit overflows. - * for the details see __msecs_to_jiffies() + * for the details see _msecs_to_jiffies() * * msecs_to_jiffies() checks for the passed in value being a constant * via __builtin_constant_p() allowing gcc to eliminate most of the diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 0cd33be7142a..6d10bd2b9e0f 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h @@ -57,10 +57,10 @@ static inline void *dereference_symbol_descriptor(void *ptr) preempt_disable(); mod = __module_address((unsigned long)ptr); - preempt_enable(); if (mod) ptr = dereference_module_function_descriptor(mod, ptr); + preempt_enable(); #endif return ptr; } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 637a60607c7d..66aaf8915277 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -879,6 +879,15 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) { int num_vcpus = atomic_read(&kvm->online_vcpus); + + /* + * Explicitly verify the target vCPU is online, as the anti-speculation + * logic only limits the CPU's ability to speculate, e.g. given a "bad" + * index, clamping the index to 0 would return vCPU0, not NULL. + */ + if (i >= num_vcpus) + return NULL; + i = array_index_nospec(i, num_vcpus); /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ diff --git a/include/linux/leds.h b/include/linux/leds.h index 79ab2dfd3c72..01fccb1c5001 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -161,7 +161,7 @@ struct led_classdev { struct kernfs_node *brightness_hw_changed_kn; #endif - /* Ensures consistent access to the LED Flash Class device */ + /* Ensures consistent access to the LED class device */ struct mutex led_access; }; diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 1f1099dac3f0..43d8734ac0eb 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -229,7 +229,7 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, (lock)->dep_map.lock_type) #define lockdep_set_subclass(lock, sub) \ - lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ + lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\ (lock)->dep_map.wait_type_inner, \ (lock)->dep_map.wait_type_outer, \ (lock)->dep_map.lock_type) diff --git a/include/linux/lzo.h b/include/linux/lzo.h index e95c7d1092b2..4d30e3624acd 100644 --- a/include/linux/lzo.h +++ b/include/linux/lzo.h @@ -24,10 +24,18 @@ int lzo1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); +/* Same as above but does not write more than dst_len to dst. */ +int lzo1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ int lzorle1x_1_compress(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len, void *wrkmem); +/* Same as above but does not write more than dst_len to dst. */ +int lzorle1x_1_compress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + /* safe decompression with overrun testing */ int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, unsigned char *dst, size_t *dst_len); diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h index fecc2fa2a364..aad9c6b50463 100644 --- a/include/linux/mfd/syscon.h +++ b/include/linux/mfd/syscon.h @@ -17,20 +17,19 @@ struct device_node; #ifdef CONFIG_MFD_SYSCON -extern struct regmap *device_node_to_regmap(struct device_node *np); -extern struct regmap *syscon_node_to_regmap(struct device_node *np); -extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s); -extern struct regmap *syscon_regmap_lookup_by_phandle( - struct device_node *np, - const char *property); -extern struct regmap *syscon_regmap_lookup_by_phandle_args( - struct device_node *np, - const char *property, - int arg_count, - unsigned int *out_args); -extern struct regmap *syscon_regmap_lookup_by_phandle_optional( - struct device_node *np, - const char *property); +struct regmap *device_node_to_regmap(struct device_node *np); +struct regmap *syscon_node_to_regmap(struct device_node *np); +struct regmap *syscon_regmap_lookup_by_compatible(const char *s); +struct regmap *syscon_regmap_lookup_by_phandle(struct device_node *np, + const char *property); +struct regmap *syscon_regmap_lookup_by_phandle_args(struct device_node *np, + const char *property, + int arg_count, + unsigned int *out_args); +struct regmap *syscon_regmap_lookup_by_phandle_optional(struct device_node *np, + const char *property); +int of_syscon_register_regmap(struct device_node *np, + struct regmap *regmap); #else static inline struct regmap *device_node_to_regmap(struct device_node *np) { @@ -70,6 +69,12 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle_optional( return NULL; } +static inline int of_syscon_register_regmap(struct device_node *np, + struct regmap *regmap) +{ + return -EOPNOTSUPP; +} + #endif #endif /* __LINUX_MFD_SYSCON_H__ */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 6646634a0b9d..0cb296f0f8d1 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1115,7 +1115,7 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, struct mlx4_buf *buf); -int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, unsigned int order); void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1cae12185cf0..3c3e0f26c244 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -716,7 +716,6 @@ struct mlx5_timer { struct timecounter tc; u32 nominal_c_mult; unsigned long overflow_period; - struct delayed_work overflow_work; }; struct mlx5_clock { @@ -1212,6 +1211,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) return dev->coredev_type == MLX5_COREDEV_VF; } +static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1, + const struct mlx5_core_dev *dev2) +{ + return dev1->coredev_type == dev2->coredev_type; +} + static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev) { return dev->caps.embedded_cpu; diff --git a/include/linux/mm.h b/include/linux/mm.h index 971186f0b7b0..03357c196e0b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1610,6 +1610,28 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma, return page_maybe_dma_pinned(page); } +/** + * is_zero_page - Query if a page is a zero page + * @page: The page to query + * + * This returns true if @page is one of the permanent zero pages. + */ +static inline bool is_zero_page(const struct page *page) +{ + return is_zero_pfn(page_to_pfn(page)); +} + +/** + * is_zero_folio - Query if a folio is a zero page + * @folio: The folio to query + * + * This returns true if @folio is one of the permanent zero pages. + */ +static inline bool is_zero_folio(const struct folio *folio) +{ + return is_zero_page(&folio->page); +} + /* MIGRATE_CMA and ZONE_MOVABLE do not allow pin pages */ #ifdef CONFIG_MIGRATION static inline bool is_longterm_pinnable_page(struct page *page) @@ -1620,8 +1642,8 @@ static inline bool is_longterm_pinnable_page(struct page *page) if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE) return false; #endif - /* The zero page may always be pinned */ - if (is_zero_pfn(page_to_pfn(page))) + /* The zero page can be "pinned" but gets special handling. */ + if (is_zero_page(page)) return true; /* Coherent device memory must always allow eviction. */ diff --git a/include/linux/mman.h b/include/linux/mman.h index 58b3abd457a3..21ea08b919d9 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -2,6 +2,7 @@ #ifndef _LINUX_MMAN_H #define _LINUX_MMAN_H +#include #include #include @@ -90,7 +91,7 @@ static inline void vm_unacct_memory(long pages) #endif #ifndef arch_calc_vm_flag_bits -#define arch_calc_vm_flag_bits(flags) 0 +#define arch_calc_vm_flag_bits(file, flags) 0 #endif #ifndef arch_validate_prot @@ -147,12 +148,12 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey) * Combine the mmap "flags" argument into "vm_flags" used internally. */ static inline unsigned long -calc_vm_flag_bits(unsigned long flags) +calc_vm_flag_bits(struct file *file, unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | - arch_calc_vm_flag_bits(flags); + arch_calc_vm_flag_bits(file, flags); } unsigned long vm_commit_limit(void); diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 7b12eebc5586..afa575e362a4 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -296,6 +296,7 @@ struct mmc_card { #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */ #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */ #define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */ +#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */ bool written_flag; /* Indicates eMMC has been written since power on */ bool reenable_cmdq; /* Re-enable Command Queue */ diff --git a/include/linux/module.h b/include/linux/module.h index 35876e89eb93..41c4c472ed17 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -536,6 +536,11 @@ struct module { atomic_t refcnt; #endif +#ifdef CONFIG_MITIGATION_ITS + int its_num_pages; + void **its_page_array; +#endif + #ifdef CONFIG_CONSTRUCTORS /* Constructor functions. */ ctor_fn_t *ctors; diff --git a/include/linux/msi.h b/include/linux/msi.h index e5dfb9cf3aa1..1bf8d126f792 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -129,6 +129,10 @@ struct pci_msi_desc { * @dev: Pointer to the device which uses this descriptor * @msg: The last set MSI message cached for reuse * @affinity: Optional pointer to a cpu affinity mask for this descriptor + * @iommu_msi_iova: Optional shifted IOVA from the IOMMU to override the msi_addr. + * Only used if iommu_msi_shift != 0 + * @iommu_msi_shift: Indicates how many bits of the original address should be + * preserved when using iommu_msi_iova. * @sysfs_attr: Pointer to sysfs device attribute * * @write_msi_msg: Callback that may be called when the MSI message @@ -146,7 +150,8 @@ struct msi_desc { struct msi_msg msg; struct irq_affinity_desc *affinity; #ifdef CONFIG_IRQ_MSI_IOMMU - const void *iommu_cookie; + u64 iommu_msi_iova : 58; + u64 iommu_msi_shift : 6; #endif #ifdef CONFIG_SYSFS struct device_attribute *sysfs_attrs; @@ -214,28 +219,14 @@ struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter); #define msi_desc_to_dev(desc) ((desc)->dev) +static inline void msi_desc_set_iommu_msi_iova(struct msi_desc *desc, u64 msi_iova, + unsigned int msi_shift) +{ #ifdef CONFIG_IRQ_MSI_IOMMU -static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) -{ - return desc->iommu_cookie; -} - -static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, - const void *iommu_cookie) -{ - desc->iommu_cookie = iommu_cookie; -} -#else -static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc) -{ - return NULL; -} - -static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc, - const void *iommu_cookie) -{ -} + desc->iommu_msi_iova = msi_iova >> msi_shift; + desc->iommu_msi_shift = msi_shift; #endif +} #ifdef CONFIG_PCI_MSI struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index fbbd0df1106b..f44701b82ea8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1747,6 +1747,13 @@ enum netdev_ml_priv_type { ML_PRIV_CAN, }; +enum netdev_stat_type { + NETDEV_PCPU_STAT_NONE, + NETDEV_PCPU_STAT_LSTATS, /* struct pcpu_lstats */ + NETDEV_PCPU_STAT_TSTATS, /* struct pcpu_sw_netstats */ + NETDEV_PCPU_STAT_DSTATS, /* struct pcpu_dstats */ +}; + /** * struct net_device - The DEVICE structure. * @@ -1941,10 +1948,14 @@ enum netdev_ml_priv_type { * * @ml_priv: Mid-layer private * @ml_priv_type: Mid-layer private type - * @lstats: Loopback statistics - * @tstats: Tunnel statistics - * @dstats: Dummy statistics - * @vstats: Virtual ethernet statistics + * + * @pcpu_stat_type: Type of device statistics which the core should + * allocate/free: none, lstats, tstats, dstats. none + * means the driver is handling statistics allocation/ + * freeing internally. + * @lstats: Loopback statistics: packets, bytes + * @tstats: Tunnel statistics: RX/TX packets, RX/TX bytes + * @dstats: Dummy statistics: RX/TX/drop packets, RX/TX bytes * * @garp_port: GARP * @mrp_port: MRP @@ -2166,7 +2177,7 @@ struct net_device { void *atalk_ptr; #endif #if IS_ENABLED(CONFIG_AX25) - void *ax25_ptr; + struct ax25_dev __rcu *ax25_ptr; #endif #if IS_ENABLED(CONFIG_CFG80211) struct wireless_dev *ieee80211_ptr; @@ -2287,6 +2298,7 @@ struct net_device { void *ml_priv; enum netdev_ml_priv_type ml_priv_type; + enum netdev_stat_type pcpu_stat_type:8; union { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; @@ -2526,6 +2538,12 @@ struct net *dev_net(const struct net_device *dev) return read_pnet(&dev->nd_net); } +static inline +struct net *dev_net_rcu(const struct net_device *dev) +{ + return read_pnet_rcu(&dev->nd_net); +} + static inline void dev_net_set(struct net_device *dev, struct net *net) { @@ -2670,6 +2688,16 @@ struct pcpu_sw_netstats { struct u64_stats_sync syncp; } __aligned(4 * sizeof(u64)); +struct pcpu_dstats { + u64 rx_packets; + u64 rx_bytes; + u64 rx_drops; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; + struct u64_stats_sync syncp; +} __aligned(8 * sizeof(u64)); + struct pcpu_lstats { u64_stats_t packets; u64_stats_t bytes; @@ -2983,6 +3011,8 @@ static inline struct net_device *first_net_device_rcu(struct net *net) } int netdev_boot_setup_check(struct net_device *dev); +struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, + const char *hwaddr); struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *hwaddr); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index bd19c4b91e31..3ddf205b7e2c 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -71,7 +71,7 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) { struct net_device *dev = napi->dev; - if (dev && dev->npinfo) { + if (dev && rcu_access_pointer(dev->npinfo)) { int owner = smp_processor_id(); while (cmpxchg(&napi->poll_owner, -1, owner) != -1) diff --git a/include/linux/nfs.h b/include/linux/nfs.h index b06375e88e58..095a95c1fae8 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h @@ -10,6 +10,7 @@ #include #include +#include #include /* @@ -44,4 +45,16 @@ enum nfs3_stable_how { /* used by direct.c to mark verf as invalid */ NFS_INVALID_STABLE_HOW = -1 }; + +/** + * nfs_fhandle_hash - calculate the crc32 hash for the filehandle + * @fh - pointer to filehandle + * + * returns a crc32 hash for the filehandle that is compatible with + * the one displayed by "wireshark". + */ +static inline u32 nfs_fhandle_hash(const struct nfs_fh *fh) +{ + return ~crc32_le(0xFFFFFFFF, &fh->data[0], fh->size); +} #endif /* _LINUX_NFS_H */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 9ea9f9087a71..a9671f930084 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -196,6 +196,15 @@ struct nfs_server { char *fscache_uniq; /* Uniquifier (or NULL) */ #endif + /* The following #defines numerically match the NFSv4 equivalents */ +#define NFS_FH_NOEXPIRE_WITH_OPEN (0x1) +#define NFS_FH_VOLATILE_ANY (0x2) +#define NFS_FH_VOL_MIGRATION (0x4) +#define NFS_FH_VOL_RENAME (0x8) +#define NFS_FH_RENAME_UNSAFE (NFS_FH_VOLATILE_ANY | NFS_FH_VOL_RENAME) + u32 fh_expire_type; /* V4 bitmask representing file + handle volatility type for + this filesystem */ u32 pnfs_blksize; /* layout_blksize attr */ #if IS_ENABLED(CONFIG_NFS_V4) u32 attr_bitmask[3];/* V4 bitmask representing the set @@ -219,9 +228,6 @@ struct nfs_server { u32 acl_bitmask; /* V4 bitmask representing the ACEs that are supported on this filesystem */ - u32 fh_expire_type; /* V4 bitmask representing file - handle volatility type for - this filesystem */ struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ struct rpc_wait_queue roc_rpcwaitq; void *pnfs_ld_data; /* per mount point data */ diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h index 57ebe1267f7f..0278ce3ad1fb 100644 --- a/include/linux/nvme-tcp.h +++ b/include/linux/nvme-tcp.h @@ -13,6 +13,8 @@ #define NVME_TCP_ADMIN_CCSZ SZ_8K #define NVME_TCP_DIGEST_LENGTH 4 #define NVME_TCP_MIN_MAXH2CDATA 4096 +#define NVME_TCP_MIN_C2HTERM_PLEN 24 +#define NVME_TCP_MAX_C2HTERM_PLEN 152 enum nvme_tcp_pfv { NVME_TCP_PFV_1_0 = 0x0, diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index a48778e1a4ee..fe729dfe509b 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -122,6 +122,7 @@ struct pci_epc_mem { * struct pci_epc - represents the PCI EPC device * @dev: PCI EPC device * @pci_epf: list of endpoint functions present in this EPC device + * list_lock: Mutex for protecting pci_epf list * @ops: function pointers for performing endpoint operations * @windows: array of address space of the endpoint controller * @mem: first window of the endpoint controller, which corresponds to @@ -139,6 +140,7 @@ struct pci_epc_mem { struct pci_epc { struct device dev; struct list_head pci_epf; + struct mutex list_lock; const struct pci_epc_ops *ops; struct pci_epc_mem **windows; struct pci_epc_mem *mem; diff --git a/include/linux/pci.h b/include/linux/pci.h index 7900a8e09c83..b56619265345 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1784,6 +1784,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) { return 0; } #endif int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); +void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent); #endif /* Some architectures require additional setup to direct VGA traffic */ @@ -2001,14 +2002,13 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); * These helpers provide future and backwards compatibility * for accessing popular PCI BAR info */ -#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) -#define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) -#define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) -#define pci_resource_len(dev,bar) \ - ((pci_resource_end((dev), (bar)) == 0) ? 0 : \ - \ - (pci_resource_end((dev), (bar)) - \ - pci_resource_start((dev), (bar)) + 1)) +#define pci_resource_n(dev, bar) (&(dev)->resource[(bar)]) +#define pci_resource_start(dev, bar) (pci_resource_n(dev, bar)->start) +#define pci_resource_end(dev, bar) (pci_resource_n(dev, bar)->end) +#define pci_resource_flags(dev, bar) (pci_resource_n(dev, bar)->flags) +#define pci_resource_len(dev,bar) \ + (pci_resource_end((dev), (bar)) ? \ + resource_size(pci_resource_n((dev), (bar))) : 0) /* * Similar to the helpers above, these manipulate per-pci_dev @@ -2476,6 +2476,12 @@ pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } #endif +#if defined(CONFIG_X86) && defined(CONFIG_ACPI) +bool arch_pci_dev_is_removable(struct pci_dev *pdev); +#else +static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; } +#endif + #ifdef CONFIG_EEH static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 06e30e816830..7bb0905ad87c 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -1754,6 +1754,10 @@ #define PCI_SUBDEVICE_ID_AT_2700FX 0x2701 #define PCI_SUBDEVICE_ID_AT_2701FX 0x2703 +#define PCI_VENDOR_ID_ASIX 0x125b +#define PCI_DEVICE_ID_ASIX_AX99100 0x9100 +#define PCI_DEVICE_ID_ASIX_AX99100_LB 0x9110 + #define PCI_VENDOR_ID_ESS 0x125d #define PCI_DEVICE_ID_ESS_ESS1968 0x1968 #define PCI_DEVICE_ID_ESS_ESS1978 0x1978 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 27b694552d58..41ff70f315a9 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -935,7 +935,13 @@ struct perf_output_handle { struct perf_buffer *rb; unsigned long wakeup; unsigned long size; - u64 aux_flags; + union { + u64 flags; /* perf_output*() */ + u64 aux_flags; /* perf_aux_output*() */ + struct { + u64 skip_read : 1; + }; + }; union { void *addr; unsigned long head; diff --git a/include/linux/pid.h b/include/linux/pid.h index bf3af54de616..653a527574c4 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -80,6 +80,7 @@ extern struct pid *pidfd_pid(const struct file *file); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags); struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags); int pidfd_create(struct pid *pid, unsigned int flags); +int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret); static inline struct pid *get_pid(struct pid *pid) { diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index dc1fb5890792..91f87d7e807c 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -103,7 +103,7 @@ int dev_pm_opp_get_supplies(struct dev_pm_opp *opp, struct dev_pm_opp_supply *su unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp); -unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); +unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index); unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); @@ -121,17 +121,29 @@ unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev); struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available); + +struct dev_pm_opp * +dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, + u32 index, bool available); + struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq); -struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, - unsigned int level); -struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, - unsigned int *level); +struct dev_pm_opp *dev_pm_opp_find_freq_floor_indexed(struct device *dev, + unsigned long *freq, u32 index); struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq); +struct dev_pm_opp *dev_pm_opp_find_freq_ceil_indexed(struct device *dev, + unsigned long *freq, u32 index); + +struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, + unsigned int level); + +struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level); + struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index); @@ -200,7 +212,7 @@ static inline unsigned long dev_pm_opp_get_power(struct dev_pm_opp *opp) return 0; } -static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) +static inline unsigned long dev_pm_opp_get_freq_indexed(struct dev_pm_opp *opp, u32 index) { return 0; } @@ -247,36 +259,55 @@ static inline unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) return 0; } -static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, - unsigned int level) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, - unsigned int *level) -{ - return ERR_PTR(-EOPNOTSUPP); -} - static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { return ERR_PTR(-EOPNOTSUPP); } +static inline struct dev_pm_opp * +dev_pm_opp_find_freq_exact_indexed(struct device *dev, unsigned long freq, + u32 index, bool available) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, unsigned long *freq) { return ERR_PTR(-EOPNOTSUPP); } +static inline struct dev_pm_opp * +dev_pm_opp_find_freq_floor_indexed(struct device *dev, unsigned long *freq, u32 index) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, unsigned long *freq) { return ERR_PTR(-EOPNOTSUPP); } +static inline struct dev_pm_opp * +dev_pm_opp_find_freq_ceil_indexed(struct device *dev, unsigned long *freq, u32 index) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev, + unsigned int level) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev, + unsigned int *level) +{ + return ERR_PTR(-EOPNOTSUPP); +} + static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev, unsigned int *bw, int index) { @@ -631,4 +662,9 @@ static inline void dev_pm_opp_put_prop_name(int token) dev_pm_opp_clear_config(token); } +static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) +{ + return dev_pm_opp_get_freq_indexed(opp, 0); +} + #endif /* __LINUX_OPP_H__ */ diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 9a8151a2bdea..b79a1a20d0d9 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -66,6 +66,7 @@ static inline bool queue_pm_work(struct work_struct *work) extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); +extern bool pm_runtime_need_not_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev); @@ -254,6 +255,7 @@ static inline bool queue_pm_work(struct work_struct *work) { return false; } static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } +static inline bool pm_runtime_need_not_resume(struct device *dev) {return true; } static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } static inline int pm_runtime_force_resume(struct device *dev) { return 0; } diff --git a/include/linux/poll.h b/include/linux/poll.h index d1ea4f3714a8..fc641b50f129 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h @@ -41,8 +41,16 @@ typedef struct poll_table_struct { static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) { - if (p && p->_qproc && wait_address) + if (p && p->_qproc && wait_address) { p->_qproc(filp, wait_address, p); + /* + * This memory barrier is paired in the wq_has_sleeper(). + * See the comment above prepare_to_wait(), we need to + * ensure that subsequent tests in this thread can't be + * reordered with __add_wait_queue() in _qproc() paths. + */ + smp_mb(); + } } /* diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 78c8ac4951b5..c7abce28ed29 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h @@ -56,8 +56,7 @@ struct pps_device { unsigned int id; /* PPS source unique ID */ void const *lookup_cookie; /* For pps_lookup_dev() only */ - struct cdev cdev; - struct device *dev; + struct device dev; struct fasync_struct *async_queue; /* fasync method */ spinlock_t lock; }; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 0260f5ea98fe..39532c19aa28 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -20,10 +20,13 @@ enum { * If in doubt, ignore this flag. */ #ifdef MODULE - PROC_ENTRY_PERMANENT = 0U, + PROC_ENTRY_PERMANENT = 0U, #else - PROC_ENTRY_PERMANENT = 1U << 0, + PROC_ENTRY_PERMANENT = 1U << 0, #endif + + PROC_ENTRY_proc_read_iter = 1U << 1, + PROC_ENTRY_proc_compat_ioctl = 1U << 2, }; struct proc_ops { diff --git a/include/linux/property.h b/include/linux/property.h index 587b5b666b5b..fb685f0f702a 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -11,6 +11,7 @@ #define _LINUX_PROPERTY_H_ #include +#include #include #include @@ -114,25 +115,32 @@ struct fwnode_handle *fwnode_get_next_available_child_node( for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ child = fwnode_get_next_available_child_node(fwnode, child)) -struct fwnode_handle *device_get_next_child_node( - struct device *dev, struct fwnode_handle *child); +struct fwnode_handle *device_get_next_child_node(const struct device *dev, + struct fwnode_handle *child); #define device_for_each_child_node(dev, child) \ for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) -struct fwnode_handle *fwnode_get_named_child_node( - const struct fwnode_handle *fwnode, const char *childname); -struct fwnode_handle *device_get_named_child_node(struct device *dev, +#define device_for_each_child_node_scoped(dev, child) \ + for (struct fwnode_handle *child __free(fwnode_handle) = \ + device_get_next_child_node(dev, NULL); \ + child; child = device_get_next_child_node(dev, child)) + +struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode, + const char *childname); +struct fwnode_handle *device_get_named_child_node(const struct device *dev, const char *childname); struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); void fwnode_handle_put(struct fwnode_handle *fwnode); +DEFINE_FREE(fwnode_handle, struct fwnode_handle *, fwnode_handle_put(_T)) + int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); -unsigned int device_get_child_node_count(struct device *dev); +unsigned int device_get_child_node_count(const struct device *dev); static inline bool device_property_read_bool(struct device *dev, const char *propname) diff --git a/include/linux/ptp_kvm.h b/include/linux/ptp_kvm.h index c2e28deef33a..746fd67c3480 100644 --- a/include/linux/ptp_kvm.h +++ b/include/linux/ptp_kvm.h @@ -14,6 +14,7 @@ struct timespec64; struct clocksource; int kvm_arch_ptp_init(void); +void kvm_arch_ptp_exit(void); int kvm_arch_ptp_get_clock(struct timespec64 *ts); int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, struct clocksource **cs); diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index c952c5ba8fab..135a3d9140a0 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h @@ -402,6 +402,10 @@ static inline void user_single_step_report(struct pt_regs *regs) #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) #endif +#ifndef exception_ip +#define exception_ip(x) instruction_pointer(x) +#endif + extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h index 3d1a9e716b80..6a0999c26c7c 100644 --- a/include/linux/rbtree_latch.h +++ b/include/linux/rbtree_latch.h @@ -206,7 +206,7 @@ latch_tree_find(void *key, struct latch_tree_root *root, do { seq = raw_read_seqcount_latch(&root->seq); node = __lt_find(key, root, seq & 1, ops->comp); - } while (read_seqcount_latch_retry(&root->seq, seq)); + } while (raw_read_seqcount_latch_retry(&root->seq, seq)); return node; } diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6858cae98da9..d1c35009831b 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -97,9 +97,9 @@ static inline void __rcu_read_lock(void) static inline void __rcu_read_unlock(void) { - preempt_enable(); if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) rcu_read_unlock_strict(); + preempt_enable(); } static inline int rcu_preempt_depth(void) @@ -133,7 +133,7 @@ static inline void rcu_sysrq_end(void) { } #if defined(CONFIG_NO_HZ_FULL) && (!defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)) void rcu_irq_work_resched(void); #else -static inline void rcu_irq_work_resched(void) { } +static __always_inline void rcu_irq_work_resched(void) { } #endif #ifdef CONFIG_RCU_NOCB_CPU @@ -1031,6 +1031,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) #define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) +#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) +#define kfree_rcu_mightsleep(ptr) kvfree_rcu_mightsleep(ptr) + #define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME #define kvfree_rcu_arg_2(ptr, rhf) \ do { \ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5efb51486e8a..54483d5e6f91 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -105,7 +105,7 @@ extern int rcu_scheduler_active; void rcu_end_inkernel_boot(void); bool rcu_inkernel_boot_has_ended(void); bool rcu_is_watching(void); -#ifndef CONFIG_PREEMPTION +#ifndef CONFIG_PREEMPT_RCU void rcu_all_qs(void); #endif diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index ae2c6a3cec5d..f532d1eda761 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -138,4 +138,26 @@ extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, extern void rtnl_offload_xstats_notify(struct net_device *dev); +static inline int rtnl_has_listeners(const struct net *net, u32 group) +{ + struct sock *rtnl = net->rtnl; + + return netlink_has_listeners(rtnl, group); +} + +/** + * rtnl_notify_needed - check if notification is needed + * @net: Pointer to the net namespace + * @nlflags: netlink ingress message flags + * @group: rtnl group + * + * Based on the ingress message flags and rtnl group, returns true + * if a notification is needed, false otherwise. + */ +static inline bool +rtnl_notify_needed(const struct net *net, u16 nlflags, u32 group) +{ + return (nlflags & NLM_F_ECHO) || rtnl_has_listeners(net, group); +} + #endif /* __LINUX_RTNETLINK_H */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 375a5e90d86a..02cdcd3c31fb 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -288,7 +288,7 @@ static inline void sg_dma_mark_bus_address(struct scatterlist *sg) } /** - * sg_unmark_bus_address - Unmark the scatterlist entry as a bus address + * sg_dma_unmark_bus_address - Unmark the scatterlist entry as a bus address * @sg: SG entry * * Description: diff --git a/include/linux/sched.h b/include/linux/sched.h index 0cac69902ec5..4dc764f3d26f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -888,9 +888,7 @@ struct task_struct { unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; -#ifdef CONFIG_PSI - unsigned sched_psi_wake_requeue:1; -#endif + unsigned sched_task_hot:1; /* Force alignment to the next boundary: */ unsigned :0; @@ -1663,15 +1661,16 @@ static inline unsigned int __task_state_index(unsigned int tsk_state, BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); - if (tsk_state == TASK_IDLE) + if ((tsk_state & TASK_IDLE) == TASK_IDLE) state = TASK_REPORT_IDLE; /* * We're lying here, but rather than expose a completely new task state * to userspace, we can make this appear as if the task has gone through * a regular rt_mutex_lock() call. + * Report frozen tasks as uninterruptible. */ - if (tsk_state == TASK_RTLOCK_WAIT) + if ((tsk_state & TASK_RTLOCK_WAIT) || (tsk_state & TASK_FROZEN)) state = TASK_UNINTERRUPTIBLE; return fls(state); diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h index 59d3736c454c..737b50f40137 100644 --- a/include/linux/sched/smt.h +++ b/include/linux/sched/smt.h @@ -12,7 +12,7 @@ static __always_inline bool sched_smt_active(void) return static_branch_likely(&sched_smt_present); } #else -static inline bool sched_smt_active(void) { return false; } +static __always_inline bool sched_smt_active(void) { return false; } #endif void arch_smt_update(void); diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index aaa25ed1a8fe..00135a8d5773 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -38,6 +38,7 @@ struct kernel_clone_args { void *fn_arg; struct cgroup *cgrp; struct css_set *cset; + unsigned int kill_seq; }; /* diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index f158b025c175..d2117e1c8fa5 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -8,6 +8,7 @@ #include #include +#include #ifdef CONFIG_THREAD_INFO_IN_TASK @@ -88,6 +89,7 @@ static inline int object_is_on_stack(const void *obj) { void *stack = task_stack_page(current); + obj = kasan_reset_tag(obj); return (obj >= stack) && (obj < (stack + THREAD_SIZE)); } diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index d31d76be4982..91ff537c6246 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -69,10 +69,10 @@ struct seccomp_data; #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER static inline int secure_computing(void) { return 0; } -static inline int __secure_computing(const struct seccomp_data *sd) { return 0; } #else static inline void secure_computing_strict(int this_syscall) { return; } #endif +static inline int __secure_computing(const struct seccomp_data *sd) { return 0; } static inline long prctl_get_seccomp(void) { diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index d778af83c8f3..b4b4ce9a4151 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -671,9 +671,9 @@ typedef struct { * * Return: sequence counter raw value. Use the lowest bit as an index for * picking which data copy to read. The full counter must then be checked - * with read_seqcount_latch_retry(). + * with raw_read_seqcount_latch_retry(). */ -static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) +static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) { /* * Pairs with the first smp_wmb() in raw_write_seqcount_latch(). @@ -683,21 +683,64 @@ static inline unsigned raw_read_seqcount_latch(const seqcount_latch_t *s) } /** - * read_seqcount_latch_retry() - end a seqcount_latch_t read section + * read_seqcount_latch() - pick even/odd latch data copy + * @s: Pointer to seqcount_latch_t + * + * See write_seqcount_latch() for details and a full reader/writer usage + * example. + * + * Return: sequence counter raw value. Use the lowest bit as an index for + * picking which data copy to read. The full counter must then be checked + * with read_seqcount_latch_retry(). + */ +static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s) +{ + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); + return raw_read_seqcount_latch(s); +} + +/** + * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section * @s: Pointer to seqcount_latch_t * @start: count, from raw_read_seqcount_latch() * * Return: true if a read section retry is required, else false */ -static inline int +static __always_inline int +raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) +{ + smp_rmb(); + return unlikely(READ_ONCE(s->seqcount.sequence) != start); +} + +/** + * read_seqcount_latch_retry() - end a seqcount_latch_t read section + * @s: Pointer to seqcount_latch_t + * @start: count, from read_seqcount_latch() + * + * Return: true if a read section retry is required, else false + */ +static __always_inline int read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) { - return read_seqcount_retry(&s->seqcount, start); + kcsan_atomic_next(0); + return raw_read_seqcount_latch_retry(s, start); } /** * raw_write_seqcount_latch() - redirect latch readers to even/odd copy * @s: Pointer to seqcount_latch_t + */ +static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s) +{ + smp_wmb(); /* prior stores before incrementing "sequence" */ + s->seqcount.sequence++; + smp_wmb(); /* increment "sequence" before following stores */ +} + +/** + * write_seqcount_latch_begin() - redirect latch readers to odd copy + * @s: Pointer to seqcount_latch_t * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -725,17 +768,11 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * * void latch_modify(struct latch_struct *latch, ...) * { - * smp_wmb(); // Ensure that the last data[1] update is visible - * latch->seq.sequence++; - * smp_wmb(); // Ensure that the seqcount update is visible - * + * write_seqcount_latch_begin(&latch->seq); * modify(latch->data[0], ...); - * - * smp_wmb(); // Ensure that the data[0] update is visible - * latch->seq.sequence++; - * smp_wmb(); // Ensure that the seqcount update is visible - * + * write_seqcount_latch(&latch->seq); * modify(latch->data[1], ...); + * write_seqcount_latch_end(&latch->seq); * } * * The query will have a form like:: @@ -746,7 +783,7 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * unsigned seq, idx; * * do { - * seq = raw_read_seqcount_latch(&latch->seq); + * seq = read_seqcount_latch(&latch->seq); * * idx = seq & 0x01; * entry = data_query(latch->data[idx], ...); @@ -776,11 +813,31 @@ read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * When data is a dynamic data structure; one should use regular RCU * patterns to manage the lifetimes of the objects within. */ -static inline void raw_write_seqcount_latch(seqcount_latch_t *s) +static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s) { - smp_wmb(); /* prior stores before incrementing "sequence" */ - s->seqcount.sequence++; - smp_wmb(); /* increment "sequence" before following stores */ + kcsan_nestable_atomic_begin(); + raw_write_seqcount_latch(s); +} + +/** + * write_seqcount_latch() - redirect latch readers to even copy + * @s: Pointer to seqcount_latch_t + */ +static __always_inline void write_seqcount_latch(seqcount_latch_t *s) +{ + raw_write_seqcount_latch(s); +} + +/** + * write_seqcount_latch_end() - end a seqcount_latch_t write section + * @s: Pointer to seqcount_latch_t + * + * Marks the end of a seqcount_latch_t writer section, after all copies of the + * latch-protected data have been updated. + */ +static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s) +{ + kcsan_nestable_atomic_end(); } /* @@ -833,11 +890,7 @@ typedef struct { */ static inline unsigned read_seqbegin(const seqlock_t *sl) { - unsigned ret = read_seqcount_begin(&sl->seqcount); - - kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ - kcsan_flat_atomic_begin(); - return ret; + return read_seqcount_begin(&sl->seqcount); } /** @@ -853,12 +906,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) */ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { - /* - * Assume not nested: read_seqretry() may be called multiple times when - * completing read critical section. - */ - kcsan_flat_atomic_end(); - return read_seqcount_retry(&sl->seqcount, start); } diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 062fe440f5d0..32bbebf5b71e 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -87,6 +87,8 @@ struct sk_psock { struct sk_psock_progs progs; #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) struct strparser strp; + u32 copied_seq; + u32 ingress_bytes; #endif struct sk_buff_head ingress_skb; struct list_head ingress_msg; @@ -308,17 +310,22 @@ static inline void sock_drop(struct sock *sk, struct sk_buff *skb) kfree_skb(skb); } -static inline void sk_psock_queue_msg(struct sk_psock *psock, +static inline bool sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { + bool ret; + spin_lock_bh(&psock->ingress_lock); - if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { list_add_tail(&msg->list, &psock->ingress_msg); - else { + ret = true; + } else { sk_msg_free(psock->sk, msg); kfree(msg); + ret = false; } spin_unlock_bh(&psock->ingress_lock); + return ret; } static inline struct sk_msg *sk_psock_dequeue_msg(struct sk_psock *psock) diff --git a/include/linux/slab.h b/include/linux/slab.h index d9606f5453c7..330a8f1f6a9b 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -12,6 +12,7 @@ #ifndef _LINUX_SLAB_H #define _LINUX_SLAB_H +#include #include #include #include @@ -225,12 +226,17 @@ static inline bool kmem_dump_obj(void *object) { return false; } * alignment larger than the alignment of a 64-bit integer. * Setting ARCH_DMA_MINALIGN in arch headers allows that. */ -#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 && !IS_ENABLED(CONFIG_ROCKCHIP_KMALLOC_NO_USE_ARCH_DMA_MINALIGN) +#ifdef ARCH_HAS_DMA_MINALIGN +#if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN) && !IS_ENABLED(CONFIG_ROCKCHIP_KMALLOC_NO_USE_ARCH_DMA_MINALIGN) #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN -#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN -#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) -#else +#endif +#endif + +#ifndef ARCH_KMALLOC_MINALIGN #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) +#elif ARCH_KMALLOC_MINALIGN > 8 +#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN +#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) #endif /* diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h index 0b9ecd8cf979..110978dc9af1 100644 --- a/include/linux/sock_diag.h +++ b/include/linux/sock_diag.h @@ -13,6 +13,7 @@ struct nlmsghdr; struct sock; struct sock_diag_handler { + struct module *owner; __u8 family; int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); int (*get_info)(struct sk_buff *skb, struct sock *sk); @@ -22,8 +23,13 @@ struct sock_diag_handler { int sock_diag_register(const struct sock_diag_handler *h); void sock_diag_unregister(const struct sock_diag_handler *h); -void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); -void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); +struct sock_diag_inet_compat { + struct module *owner; + int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh); +}; + +void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr); +void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr); u64 __sock_gen_cookie(struct sock *sk); diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index bae5e2369b4f..0eb3a2b1f81f 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -50,11 +50,38 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, return 0; } +/* Deprecated. + * This is unsafe, unless caller checked user provided optlen. + * Prefer copy_safe_from_sockptr() instead. + */ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) { return copy_from_sockptr_offset(dst, src, 0, size); } +/** + * copy_safe_from_sockptr: copy a struct from sockptr + * @dst: Destination address, in kernel space. This buffer must be @ksize + * bytes long. + * @ksize: Size of @dst struct. + * @optval: Source address. (in user or kernel space) + * @optlen: Size of @optval data. + * + * Returns: + * * -EINVAL: @optlen < @ksize + * * -EFAULT: access to userspace failed. + * * 0 : @ksize bytes were copied + */ +static inline int copy_safe_from_sockptr(void *dst, size_t ksize, + sockptr_t optval, unsigned int optlen) +{ + if (optlen < ksize) + return -EINVAL; + if (copy_from_sockptr(dst, optval, ksize)) + return -EFAULT; + return 0; +} + static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset, const void *src, size_t size) { diff --git a/include/linux/static_call.h b/include/linux/static_call.h index df53bed9d71f..d73384f50603 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -160,6 +160,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool #ifdef CONFIG_HAVE_STATIC_CALL_INLINE +extern bool static_call_initialized; + extern int __init static_call_init(void); struct static_call_mod { @@ -223,6 +225,8 @@ extern long __static_call_return0(void); #elif defined(CONFIG_HAVE_STATIC_CALL) +#define static_call_initialized 0 + static inline int static_call_init(void) { return 0; } #define DEFINE_STATIC_CALL(name, _func) \ @@ -279,6 +283,8 @@ extern long __static_call_return0(void); #else /* Generic implementation */ +#define static_call_initialized 0 + static inline int static_call_init(void) { return 0; } static inline long __static_call_return0(void) diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 8f9bee0e21c3..a220b28904ca 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -140,13 +140,14 @@ struct rpc_task_setup { #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) #define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE) -#define RPC_TASK_RUNNING 0 -#define RPC_TASK_QUEUED 1 -#define RPC_TASK_ACTIVE 2 -#define RPC_TASK_NEED_XMIT 3 -#define RPC_TASK_NEED_RECV 4 -#define RPC_TASK_MSG_PIN_WAIT 5 -#define RPC_TASK_SIGNALLED 6 +enum { + RPC_TASK_RUNNING, + RPC_TASK_QUEUED, + RPC_TASK_ACTIVE, + RPC_TASK_NEED_XMIT, + RPC_TASK_NEED_RECV, + RPC_TASK_MSG_PIN_WAIT, +}; #define rpc_test_and_set_running(t) \ test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) @@ -158,7 +159,7 @@ struct rpc_task_setup { #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) -#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate) +#define RPC_SIGNALLED(t) (READ_ONCE(task->tk_rpc_status) == -ERESTARTSYS) /* * Task priorities. diff --git a/include/linux/tpm.h b/include/linux/tpm.h index df5cd4245f29..4a4112bb1d1b 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -181,7 +181,7 @@ enum tpm2_const { enum tpm2_timeouts { TPM2_TIMEOUT_A = 750, - TPM2_TIMEOUT_B = 2000, + TPM2_TIMEOUT_B = 4000, TPM2_TIMEOUT_C = 200, TPM2_TIMEOUT_D = 30, TPM2_DURATION_SHORT = 20, @@ -271,6 +271,7 @@ enum tpm2_cc_attrs { #define TPM_VID_WINBOND 0x1050 #define TPM_VID_STM 0x104A #define TPM_VID_ATML 0x1114 +#define TPM_VID_IFX 0x15D1 enum tpm_chip_flags { TPM_CHIP_FLAG_BOOTSTRAPPED = BIT(0), diff --git a/include/linux/trace.h b/include/linux/trace.h index 2a70a447184c..bb4d84f1c58c 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h @@ -72,8 +72,8 @@ static inline int unregister_ftrace_export(struct trace_export *export) static inline void trace_printk_init_buffers(void) { } -static inline int trace_array_printk(struct trace_array *tr, unsigned long ip, - const char *fmt, ...) +static inline __printf(3, 4) +int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { return 0; } diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index f70624ec4188..f042574d1fb6 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -355,7 +355,7 @@ struct trace_event_call { struct list_head list; struct trace_event_class *class; union { - char *name; + const char *name; /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */ struct tracepoint *tp; }; diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 5a2c650d9e1c..c230cbd25aee 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -77,8 +77,8 @@ extern __printf(2, 3) void trace_seq_printf(struct trace_seq *s, const char *fmt, ...); extern __printf(2, 0) void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); -extern void -trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); +extern __printf(2, 0) +void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt); @@ -100,8 +100,8 @@ extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str, static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) { } -static inline void -trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) +static inline __printf(2, 0) +void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) { } diff --git a/include/linux/types.h b/include/linux/types.h index ea8cf60a8a79..f1df5d8df2ef 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -109,8 +109,9 @@ typedef u64 u_int64_t; typedef s64 int64_t; #endif -/* this is a special 64bit data type that is 8-byte aligned */ +/* These are the special 64-bit data types that are 8-byte aligned */ #define aligned_u64 __aligned_u64 +#define aligned_s64 __aligned_s64 #define aligned_be64 __aligned_be64 #define aligned_le64 __aligned_le64 diff --git a/include/linux/usb.h b/include/linux/usb.h index 3ce7b052a19f..1326dded358f 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -697,13 +697,12 @@ struct usb_device { unsigned long active_duration; -#ifdef CONFIG_PM unsigned long connect_time; unsigned do_remote_wakeup:1; unsigned reset_resume:1; unsigned port_is_suspended:1; -#endif + struct wusb_dev *wusb_dev; int slot_id; struct usb2_lpm_parameters l1_params; diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index cd667acf6267..575716d3672a 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -486,9 +486,7 @@ extern void usb_hcd_pci_shutdown(struct pci_dev *dev); extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev); -#ifdef CONFIG_PM extern const struct dev_pm_ops usb_hcd_pci_pm_ops; -#endif #endif /* CONFIG_USB_PCI */ /* pci-ish (pdev null is ok) buffer alloc/mapping support */ diff --git a/include/linux/usb/r8152.h b/include/linux/usb/r8152.h index 33a4c146dc19..2ca60828f28b 100644 --- a/include/linux/usb/r8152.h +++ b/include/linux/usb/r8152.h @@ -30,6 +30,7 @@ #define VENDOR_ID_NVIDIA 0x0955 #define VENDOR_ID_TPLINK 0x2357 #define VENDOR_ID_DLINK 0x2001 +#define VENDOR_ID_DELL 0x413c #define VENDOR_ID_ASUS 0x0b05 #if IS_REACHABLE(CONFIG_USB_RTL8152) diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index bffc8d3e14ad..0af213187e9f 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -145,7 +145,8 @@ struct tcpc_dev { void (*frs_sourcing_vbus)(struct tcpc_dev *dev); int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable); int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode, - bool pps_active, u32 requested_vbus_voltage); + bool pps_active, u32 requested_vbus_voltage, + u32 pps_apdo_min_voltage); bool (*is_vbus_vsafe0v)(struct tcpc_dev *dev); void (*set_partner_usb_comm_capable)(struct tcpc_dev *dev, bool enable); }; diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index 43db6e47503c..11fae273b3b2 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h @@ -2,19 +2,6 @@ #ifndef _LINUX_HELPER_MACROS_H_ #define _LINUX_HELPER_MACROS_H_ -#define __find_closest(x, a, as, op) \ -({ \ - typeof(as) __fc_i, __fc_as = (as) - 1; \ - typeof(x) __fc_x = (x); \ - typeof(*a) const *__fc_a = (a); \ - for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ - if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ - __fc_a[__fc_i + 1], 2)) \ - break; \ - } \ - (__fc_i); \ -}) - /** * find_closest - locate the closest element in a sorted array * @x: The reference value. @@ -23,8 +10,27 @@ * @as: Size of 'a'. * * Returns the index of the element closest to 'x'. + * Note: If using an array of negative numbers (or mixed positive numbers), + * then be sure that 'x' is of a signed-type to get good results. */ -#define find_closest(x, a, as) __find_closest(x, a, as, <=) +#define find_closest(x, a, as) \ +({ \ + typeof(as) __fc_i, __fc_as = (as) - 1; \ + long __fc_mid_x, __fc_x = (x); \ + long __fc_left, __fc_right; \ + typeof(*a) const *__fc_a = (a); \ + for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ + __fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i + 1]) / 2; \ + if (__fc_x <= __fc_mid_x) { \ + __fc_left = __fc_x - __fc_a[__fc_i]; \ + __fc_right = __fc_a[__fc_i + 1] - __fc_x; \ + if (__fc_right < __fc_left) \ + __fc_i++; \ + break; \ + } \ + } \ + (__fc_i); \ +}) /** * find_closest_descending - locate the closest element in a sorted array @@ -34,9 +40,27 @@ * @as: Size of 'a'. * * Similar to find_closest() but 'a' is expected to be sorted in descending - * order. + * order. The iteration is done in reverse order, so that the comparison + * of '__fc_right' & '__fc_left' also works for unsigned numbers. */ -#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=) +#define find_closest_descending(x, a, as) \ +({ \ + typeof(as) __fc_i, __fc_as = (as) - 1; \ + long __fc_mid_x, __fc_x = (x); \ + long __fc_left, __fc_right; \ + typeof(*a) const *__fc_a = (a); \ + for (__fc_i = __fc_as; __fc_i >= 1; __fc_i--) { \ + __fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i - 1]) / 2; \ + if (__fc_x <= __fc_mid_x) { \ + __fc_left = __fc_x - __fc_a[__fc_i]; \ + __fc_right = __fc_a[__fc_i - 1] - __fc_x; \ + if (__fc_right < __fc_left) \ + __fc_i--; \ + break; \ + } \ + } \ + (__fc_i); \ +}) /** * is_insidevar - check if the @ptr points inside the @var memory range. diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 19cf5b6892ce..4fb5fa0cc84e 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -513,7 +513,7 @@ static inline const char *node_stat_name(enum node_stat_item item) static inline const char *lru_list_name(enum lru_list lru) { - return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" + return node_stat_name(NR_LRU_BASE + (enum node_stat_item)lru) + 3; // skip "nr_" } static inline const char *writeback_stat_name(enum writeback_stat_item item) diff --git a/include/linux/wait.h b/include/linux/wait.h index a0307b516b09..bc06a315653a 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -224,6 +224,7 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) +#define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL) #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h index 8fa963326bf6..c64096b5c782 100644 --- a/include/media/v4l2-dv-timings.h +++ b/include/media/v4l2-dv-timings.h @@ -146,15 +146,18 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, * @polarities: the horizontal and vertical polarities (same as struct * v4l2_bt_timings polarities). * @interlaced: if this flag is true, it indicates interlaced format + * @cap: the v4l2_dv_timings_cap capabilities. * @fmt: the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid CVT format. If so, then it will return true, and fmt will be filled * in with the found CVT timings. */ -bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync, - unsigned active_width, u32 polarities, bool interlaced, - struct v4l2_dv_timings *fmt); +bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, unsigned int active_width, + u32 polarities, bool interlaced, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); /** * v4l2_detect_gtf - detect if the given timings follow the GTF standard @@ -170,15 +173,18 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync, * image height, so it has to be passed explicitly. Usually * the native screen aspect ratio is used for this. If it * is not filled in correctly, then 16:9 will be assumed. + * @cap: the v4l2_dv_timings_cap capabilities. * @fmt: the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid GTF format. If so, then it will return true, and fmt will be filled * in with the found GTF timings. */ -bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync, - u32 polarities, bool interlaced, struct v4l2_fract aspect, - struct v4l2_dv_timings *fmt); +bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, u32 polarities, bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); /** * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes diff --git a/include/memory/renesas-rpc-if.h b/include/memory/renesas-rpc-if.h index ddf94356752d..b1b6d9126b03 100644 --- a/include/memory/renesas-rpc-if.h +++ b/include/memory/renesas-rpc-if.h @@ -69,20 +69,10 @@ struct rpcif { }; int rpcif_sw_init(struct rpcif *rpc, struct device *dev); -int rpcif_hw_init(struct rpcif *rpc, bool hyperflash); -void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs, +int rpcif_hw_init(struct device *dev, bool hyperflash); +void rpcif_prepare(struct device *dev, const struct rpcif_op *op, u64 *offs, size_t *len); -int rpcif_manual_xfer(struct rpcif *rpc); -ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf); - -static inline void rpcif_enable_rpm(struct rpcif *rpc) -{ - pm_runtime_enable(rpc->dev); -} - -static inline void rpcif_disable_rpm(struct rpcif *rpc) -{ - pm_runtime_disable(rpc->dev); -} +int rpcif_manual_xfer(struct device *dev); +ssize_t rpcif_dirmap_read(struct device *dev, u64 offs, size_t len, void *buf); #endif // __RENESAS_RPC_IF_H diff --git a/include/net/af_unix.h b/include/net/af_unix.h index e7d71a516bd4..b1f82d74339e 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -8,20 +8,46 @@ #include #include -void unix_inflight(struct user_struct *user, struct file *fp); -void unix_notinflight(struct user_struct *user, struct file *fp); -void unix_destruct_scm(struct sk_buff *skb); +#if IS_ENABLED(CONFIG_UNIX) +struct unix_sock *unix_get_socket(struct file *filp); +#else +static inline struct unix_sock *unix_get_socket(struct file *filp) +{ + return NULL; +} +#endif + +extern unsigned int unix_tot_inflight; +void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver); +void unix_del_edges(struct scm_fp_list *fpl); +void unix_update_edges(struct unix_sock *receiver); +int unix_prepare_fpl(struct scm_fp_list *fpl); +void unix_destroy_fpl(struct scm_fp_list *fpl); void unix_gc(void); -void wait_for_unix_gc(void); -struct sock *unix_get_socket(struct file *filp); +void wait_for_unix_gc(struct scm_fp_list *fpl); + +struct unix_vertex { + struct list_head edges; + struct list_head entry; + struct list_head scc_entry; + unsigned long out_degree; + unsigned long index; + unsigned long scc_index; +}; + +struct unix_edge { + struct unix_sock *predecessor; + struct unix_sock *successor; + struct list_head vertex_entry; + struct list_head stack_entry; +}; + struct sock *unix_peer_get(struct sock *sk); #define UNIX_HASH_MOD (256 - 1) #define UNIX_HASH_SIZE (256 * 2) #define UNIX_HASH_BITS 8 -extern unsigned int unix_tot_inflight; - struct unix_address { refcount_t refcnt; int len; @@ -41,6 +67,7 @@ struct unix_skb_parms { struct scm_stat { atomic_t nr_fds; + unsigned long nr_unix_fds; }; #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) @@ -53,12 +80,9 @@ struct unix_sock { struct path path; struct mutex iolock, bindlock; struct sock *peer; - struct list_head link; - unsigned long inflight; + struct sock *listener; + struct unix_vertex *vertex; spinlock_t lock; - unsigned long gc_flags; -#define UNIX_GC_CANDIDATE 0 -#define UNIX_GC_MAYBE_CYCLE 1 struct socket_wq peer_wq; wait_queue_entry_t peer_wake; struct scm_stat scm_stat; diff --git a/include/net/ax25.h b/include/net/ax25.h index 1d55e8ee08b4..e9465aa07a4e 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -229,6 +229,7 @@ typedef struct ax25_dev { #endif refcount_t refcount; bool device_up; + struct rcu_head rcu; } ax25_dev; typedef struct ax25_cb { @@ -291,9 +292,8 @@ static inline void ax25_dev_hold(ax25_dev *ax25_dev) static inline void ax25_dev_put(ax25_dev *ax25_dev) { - if (refcount_dec_and_test(&ax25_dev->refcount)) { - kfree(ax25_dev); - } + if (refcount_dec_and_test(&ax25_dev->refcount)) + kfree_rcu(ax25_dev, rcu); } static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { @@ -336,9 +336,9 @@ void ax25_digi_invert(const ax25_digi *, ax25_digi *); extern spinlock_t ax25_dev_lock; #if IS_ENABLED(CONFIG_AX25) -static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev) +static inline ax25_dev *ax25_dev_ax25dev(const struct net_device *dev) { - return dev->ax25_ptr; + return rcu_dereference_rtnl(dev->ax25_ptr); } #endif diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 41fc7f12971a..5689b4744764 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -122,6 +122,7 @@ struct bt_voice { #define BT_VOICE_TRANSPARENT 0x0003 #define BT_VOICE_CVSD_16BIT 0x0060 +#define BT_VOICE_TRANSPARENT_16BIT 0x0063 #define BT_SNDMTU 12 #define BT_RCVMTU 13 diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 7accd5ff0760..3a7658d66022 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -47,6 +47,18 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy); int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy); +struct hci_cmd_sync_work_entry * +hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy); +int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy); +void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, + struct hci_cmd_sync_work_entry *entry); +bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy); +bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, + hci_cmd_sync_work_func_t func, void *data, + hci_cmd_sync_work_destroy_t destroy); int hci_update_eir_sync(struct hci_dev *hdev); int hci_update_class_sync(struct hci_dev *hdev); diff --git a/include/net/bond_options.h b/include/net/bond_options.h index 69292ecc0325..f631d9f09941 100644 --- a/include/net/bond_options.h +++ b/include/net/bond_options.h @@ -160,5 +160,7 @@ void bond_option_arp_ip_targets_clear(struct bonding *bond); #if IS_ENABLED(CONFIG_IPV6) void bond_option_ns_ip6_targets_clear(struct bonding *bond); #endif +void bond_slave_ns_maddrs_add(struct bonding *bond, struct slave *slave); +void bond_slave_ns_maddrs_del(struct bonding *bond, struct slave *slave); #endif /* _NET_BOND_OPTIONS_H */ diff --git a/include/net/dsa.h b/include/net/dsa.h index f96b61d9768e..e26bace77479 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -888,9 +888,15 @@ struct dsa_switch_ops { phy_interface_t iface); int (*phylink_mac_link_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); + int (*phylink_mac_prepare)(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface); void (*phylink_mac_config)(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state); + int (*phylink_mac_finish)(struct dsa_switch *ds, int port, + unsigned int mode, + phy_interface_t interface); void (*phylink_mac_an_restart)(struct dsa_switch *ds, int port); void (*phylink_mac_link_down)(struct dsa_switch *ds, int port, unsigned int mode, diff --git a/include/net/dst.h b/include/net/dst.h index d67fda89cd0f..3a1a6f94a809 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -434,6 +434,15 @@ static inline void dst_set_expires(struct dst_entry *dst, int timeout) dst->expires = expires; } +static inline unsigned int dst_dev_overhead(struct dst_entry *dst, + struct sk_buff *skb) +{ + if (likely(dst)) + return LL_RESERVED_SPACE(dst->dev); + + return skb->mac_len; +} + INDIRECT_CALLABLE_DECLARE(int ip6_output(struct net *, struct sock *, struct sk_buff *)); INDIRECT_CALLABLE_DECLARE(int ip_output(struct net *, struct sock *, diff --git a/include/net/flow.h b/include/net/flow.h index 079cc493fe67..5a17fa6e016f 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -115,11 +115,10 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, } /* Reset some input parameters after previous lookup */ -static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos, +static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __be32 daddr, __be32 saddr) { fl4->flowi4_oif = oif; - fl4->flowi4_tos = tos; fl4->daddr = daddr; fl4->saddr = saddr; } diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 4242f863f560..7649d4901f0c 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -281,7 +281,7 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk) static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) { - return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog; + return inet_csk_reqsk_queue_len(sk) > READ_ONCE(sk->sk_max_ack_backlog); } bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req); diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 74ff688568a0..f475757daafb 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h @@ -96,30 +96,28 @@ static inline struct in6_addr *inetpeer_get_addr_v6(struct inetpeer_addr *iaddr) /* can be called with or without local BH being disabled */ struct inet_peer *inet_getpeer(struct inet_peer_base *base, - const struct inetpeer_addr *daddr, - int create); + const struct inetpeer_addr *daddr); static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base, __be32 v4daddr, - int vif, int create) + int vif) { struct inetpeer_addr daddr; daddr.a4.addr = v4daddr; daddr.a4.vif = vif; daddr.family = AF_INET; - return inet_getpeer(base, &daddr, create); + return inet_getpeer(base, &daddr); } static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base, - const struct in6_addr *v6daddr, - int create) + const struct in6_addr *v6daddr) { struct inetpeer_addr daddr; daddr.a6 = *v6daddr; daddr.family = AF_INET6; - return inet_getpeer(base, &daddr, create); + return inet_getpeer(base, &daddr); } static inline int inetpeer_addr_cmp(const struct inetpeer_addr *a, diff --git a/include/net/ip.h b/include/net/ip.h index 9d754c4a5300..4ee23eb0814a 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -409,6 +409,11 @@ int ip_decrease_ttl(struct iphdr *iph) return --iph->ttl; } +static inline dscp_t ip4h_dscp(const struct iphdr *ip4h) +{ + return inet_dsfield_to_dscp(ip4h->tos); +} + static inline int ip_mtu_locked(const struct dst_entry *dst) { const struct rtable *rt = (const struct rtable *)dst; diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index f1ba369306fe..84751313b826 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -57,6 +57,13 @@ struct ip_tunnel_key { __u8 flow_flags; }; +struct ip_tunnel_encap { + u16 type; + u16 flags; + __be16 sport; + __be16 dport; +}; + /* Flags for ip_tunnel_info mode. */ #define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */ #define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */ @@ -66,9 +73,9 @@ struct ip_tunnel_key { #define IP_TUNNEL_OPTS_MAX \ GENMASK((sizeof_field(struct ip_tunnel_info, \ options_len) * BITS_PER_BYTE) - 1, 0) - struct ip_tunnel_info { struct ip_tunnel_key key; + struct ip_tunnel_encap encap; #ifdef CONFIG_DST_CACHE struct dst_cache dst_cache; #endif @@ -86,13 +93,6 @@ struct ip_tunnel_6rd_parm { }; #endif -struct ip_tunnel_encap { - u16 type; - u16 flags; - __be16 sport; - __be16 dport; -}; - struct ip_tunnel_prl_entry { struct ip_tunnel_prl_entry __rcu *next; __be32 addr; @@ -293,6 +293,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, __be32 remote, __be32 local, __be32 key); +void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info); int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, bool log_ecn_error); @@ -405,22 +406,23 @@ static inline int ip_encap_hlen(struct ip_tunnel_encap *e) return hlen; } -static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, +static inline int ip_tunnel_encap(struct sk_buff *skb, + struct ip_tunnel_encap *e, u8 *protocol, struct flowi4 *fl4) { const struct ip_tunnel_encap_ops *ops; int ret = -EINVAL; - if (t->encap.type == TUNNEL_ENCAP_NONE) + if (e->type == TUNNEL_ENCAP_NONE) return 0; - if (t->encap.type >= MAX_IPTUN_ENCAP_OPS) + if (e->type >= MAX_IPTUN_ENCAP_OPS) return -EINVAL; rcu_read_lock(); - ops = rcu_dereference(iptun_encaps[t->encap.type]); + ops = rcu_dereference(iptun_encaps[e->type]); if (likely(ops && ops->build_header)) - ret = ops->build_header(skb, &t->encap, protocol, fl4); + ret = ops->build_header(skb, e, protocol, fl4); rcu_read_unlock(); return ret; diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 031c661aa14d..bdfa9d414360 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -198,10 +198,12 @@ struct sk_buff *l3mdev_l3_out(struct sock *sk, struct sk_buff *skb, u16 proto) if (netif_is_l3_slave(dev)) { struct net_device *master; + rcu_read_lock(); master = netdev_master_upper_dev_get_rcu(dev); if (master && master->l3mdev_ops->l3mdev_l3_out) skb = master->l3mdev_ops->l3mdev_l3_out(master, sk, skb, proto); + rcu_read_unlock(); } return skb; diff --git a/include/net/lapb.h b/include/net/lapb.h index 124ee122f2c8..6c07420644e4 100644 --- a/include/net/lapb.h +++ b/include/net/lapb.h @@ -4,7 +4,7 @@ #include #include -#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */ +#define LAPB_HEADER_LEN MAX_HEADER /* LAPB over Ethernet + a bit more */ #define LAPB_ACK_PENDING_CONDITION 0x01 #define LAPB_REJECT_CONDITION 0x02 diff --git a/include/net/mac80211.h b/include/net/mac80211.h index a5a5a2915736..6ef8348b5e93 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -2956,6 +2956,19 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, */ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); +/** + * ieee80211_purge_tx_queue - purge TX skb queue + * @hw: the hardware + * @skbs: the skbs + * + * Free a set of transmit skbs. Use this function when device is going to stop + * but some transmit skbs without TX status are still queued. + * This function does not take the list lock and the caller must hold the + * relevant locks to use it. + */ +void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, + struct sk_buff_head *skbs); + /** * DOC: Hardware crypto acceleration * diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 8c3587d5c308..ee50d5493662 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -81,6 +81,7 @@ struct net { * or to unregister pernet ops * (pernet_ops_rwsem write locked). */ + struct llist_node defer_free_list; struct llist_node cleanup_list; /* namespaces on death row */ #ifdef CONFIG_KEYS @@ -352,21 +353,30 @@ static inline void put_net_track(struct net *net, netns_tracker *tracker) typedef struct { #ifdef CONFIG_NET_NS - struct net *net; + struct net __rcu *net; #endif } possible_net_t; static inline void write_pnet(possible_net_t *pnet, struct net *net) { #ifdef CONFIG_NET_NS - pnet->net = net; + rcu_assign_pointer(pnet->net, net); #endif } static inline struct net *read_pnet(const possible_net_t *pnet) { #ifdef CONFIG_NET_NS - return pnet->net; + return rcu_dereference_protected(pnet->net, true); +#else + return &init_net; +#endif +} + +static inline struct net *read_pnet_rcu(const possible_net_t *pnet) +{ +#ifdef CONFIG_NET_NS + return rcu_dereference(pnet->net); #else return &init_net; #endif @@ -425,6 +435,9 @@ struct pernet_operations { void (*pre_exit)(struct net *net); void (*exit)(struct net *net); void (*exit_batch)(struct list_head *net_exit_list); + /* Following method is called with RTNL held. */ + void (*exit_batch_rtnl)(struct list_head *net_exit_list, + struct list_head *dev_kill_list); unsigned int *id; size_t size; }; diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index 0855b60fba17..f642a87ea330 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h @@ -100,7 +100,7 @@ nf_ct_expect_find_get(struct net *net, struct nf_conntrack_expect * nf_ct_find_expectation(struct net *net, const struct nf_conntrack_zone *zone, - const struct nf_conntrack_tuple *tuple); + const struct nf_conntrack_tuple *tuple, bool unlink); void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, u32 portid, int report); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index c24b04235d91..7252a5aae069 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -706,15 +706,18 @@ struct nft_set_ext_tmpl { /** * struct nft_set_ext - set extensions * - * @genmask: generation mask + * @genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT) * @offset: offsets of individual extension types * @data: beginning of extension data + * + * This structure must be aligned to word size, otherwise atomic bitops + * on genmask field can cause alignment failure on some archs. */ struct nft_set_ext { u8 genmask; u8 offset[NFT_SET_EXT_NUM]; char data[]; -}; +} __aligned(BITS_PER_LONG / 8); static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl) { @@ -1042,6 +1045,7 @@ struct nft_rule_blob { * @use: number of jump references to this chain * @flags: bitmask of enum nft_chain_flags * @name: name of the chain + * @rcu_head: rcu head for deferred release */ struct nft_chain { struct nft_rule_blob __rcu *blob_gen_0; @@ -1118,7 +1122,7 @@ static inline bool nft_chain_is_bound(struct nft_chain *chain) int nft_chain_add(struct nft_table *table, struct nft_chain *chain); void nft_chain_del(struct nft_chain *chain); -void nf_tables_chain_destroy(struct nft_ctx *ctx); +void nf_tables_chain_destroy(struct nft_chain *chain); struct nft_stats { u64 bytes; diff --git a/include/net/route.h b/include/net/route.h index af8431b25f80..cdca622c5c6f 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -203,12 +203,13 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src, const struct sk_buff *hint); static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin) + dscp_t dscp, struct net_device *devin) { int err; rcu_read_lock(); - err = ip_route_input_noref(skb, dst, src, tos, devin); + err = ip_route_input_noref(skb, dst, src, inet_dscp_to_dsfield(dscp), + devin); if (!err) { skb_dst_force(skb); if (!skb_dst(skb)) @@ -324,8 +325,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, __be32 dst, if (IS_ERR(rt)) return rt; ip_rt_put(rt); - flowi4_update_output(fl4, oif, fl4->flowi4_tos, fl4->daddr, - fl4->saddr); + flowi4_update_output(fl4, oif, fl4->daddr, fl4->saddr); } security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); return ip_route_output_flow(net, fl4, sk); @@ -340,8 +340,7 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable fl4->fl4_dport = dport; fl4->fl4_sport = sport; ip_rt_put(rt); - flowi4_update_output(fl4, sk->sk_bound_dev_if, - RT_CONN_FLAGS(sk), fl4->daddr, + flowi4_update_output(fl4, sk->sk_bound_dev_if, fl4->daddr, fl4->saddr); security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4)); return ip_route_output_flow(sock_net(sk), fl4, sk); @@ -362,10 +361,15 @@ static inline int inet_iif(const struct sk_buff *skb) static inline int ip4_dst_hoplimit(const struct dst_entry *dst) { int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT); - struct net *net = dev_net(dst->dev); - if (hoplimit == 0) + if (hoplimit == 0) { + const struct net *net; + + rcu_read_lock(); + net = dev_net_rcu(dst->dev); hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); + rcu_read_unlock(); + } return hoplimit; } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 743acbc43c85..b34e9e93a146 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -817,7 +817,7 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, } static inline void _bstats_update(struct gnet_stats_basic_sync *bstats, - __u64 bytes, __u32 packets) + __u64 bytes, __u64 packets) { u64_stats_update_begin(&bstats->syncp); u64_stats_add(&bstats->bytes, bytes); @@ -997,6 +997,21 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) return skb; } +static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool direct) +{ + struct sk_buff *skb; + + skb = __skb_dequeue(&sch->gso_skb); + if (skb) { + sch->q.qlen--; + return skb; + } + if (direct) + return __qdisc_dequeue_head(&sch->q); + else + return sch->dequeue(sch); +} + static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch) { struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); diff --git a/include/net/scm.h b/include/net/scm.h index 585adc1346bd..0be0dc3eb1dc 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -21,9 +21,20 @@ struct scm_creds { kgid_t gid; }; +#ifdef CONFIG_UNIX +struct unix_edge; +#endif + struct scm_fp_list { short count; + short count_unix; short max; +#ifdef CONFIG_UNIX + bool inflight; + bool dead; + struct list_head vertices; + struct unix_edge *edges; +#endif struct user_struct *user; struct file *fp[SCM_MAX_FD]; }; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 00ee9eb601a6..8995914916ca 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -777,6 +777,7 @@ struct sctp_transport { /* Reference counting. */ refcount_t refcnt; + __u32 dead:1, /* RTO-Pending : A flag used to track if one of the DATA * chunks sent to this address is currently being * used to compute a RTT. If this flag is 0, @@ -786,7 +787,7 @@ struct sctp_transport { * calculation completes (i.e. the DATA chunk * is SACK'd) clear this flag. */ - __u32 rto_pending:1, + rto_pending:1, /* * hb_sent : a flag that signals that we have a pending diff --git a/include/net/sock.h b/include/net/sock.h index 1b39b62d44e2..e716b2ba00bb 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1660,7 +1660,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size) } static inline bool -sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) +__sk_rmem_schedule(struct sock *sk, int size, bool pfmemalloc) { int delta; @@ -1668,7 +1668,13 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) return true; delta = size - sk->sk_forward_alloc; return delta <= 0 || __sk_mem_schedule(sk, delta, SK_MEM_RECV) || - skb_pfmemalloc(skb); + pfmemalloc; +} + +static inline bool +sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) +{ + return __sk_rmem_schedule(sk, size, skb_pfmemalloc(skb)); } static inline int sk_unused_reserved_mem(const struct sock *sk) @@ -2246,7 +2252,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst) sk_tx_queue_clear(sk); WRITE_ONCE(sk->sk_dst_pending_confirm, 0); - old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); + old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst))); dst_release(old_dst); } diff --git a/include/net/strparser.h b/include/net/strparser.h index 41e2ce9e9e10..0a83010b3a64 100644 --- a/include/net/strparser.h +++ b/include/net/strparser.h @@ -43,6 +43,8 @@ struct strparser; struct strp_callbacks { int (*parse_msg)(struct strparser *strp, struct sk_buff *skb); void (*rcv_msg)(struct strparser *strp, struct sk_buff *skb); + int (*read_sock)(struct strparser *strp, read_descriptor_t *desc, + sk_read_actor_t recv_actor); int (*read_sock_done)(struct strparser *strp, int err); void (*abort_parser)(struct strparser *strp, int err); void (*lock)(struct strparser *strp); diff --git a/include/net/tcp.h b/include/net/tcp.h index a770210fda9b..83e0362e3b72 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -640,6 +641,19 @@ void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); void tcp_sack_compress_send_ack(struct sock *sk); +static inline void tcp_cleanup_skb(struct sk_buff *skb) +{ + skb_dst_drop(skb); + secpath_reset(skb); +} + +static inline void tcp_add_receive_queue(struct sock *sk, struct sk_buff *skb) +{ + DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); + DEBUG_NET_WARN_ON_ONCE(secpath_exists(skb)); + __skb_queue_tail(&sk->sk_receive_queue, skb); +} + /* tcp_timer.c */ void tcp_init_xmit_timers(struct sock *); static inline void tcp_clear_xmit_timers(struct sock *sk) @@ -686,6 +700,9 @@ void tcp_get_info(struct sock *, struct tcp_info *); /* Read 'sendfile()'-style from a TCP socket */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor); +int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor, bool noack, + u32 *copied_seq); int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor); struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off); void tcp_read_done(struct sock *sk, size_t len); @@ -2337,6 +2354,11 @@ struct sk_psock; struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock); int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore); void tcp_bpf_clone(const struct sock *sk, struct sock *newsk); +#ifdef CONFIG_BPF_STREAM_PARSER +struct strparser; +int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc, + sk_read_actor_t recv_actor); +#endif /* CONFIG_BPF_STREAM_PARSER */ #endif /* CONFIG_BPF_SYSCALL */ #ifdef CONFIG_INET diff --git a/include/net/xfrm.h b/include/net/xfrm.h index bf670929622d..64911162ab5f 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -212,7 +212,6 @@ struct xfrm_state { /* Data for encapsulator */ struct xfrm_encap_tmpl *encap; - struct sock __rcu *encap_sk; /* Data for care-of address */ xfrm_address_t *coaddr; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 558250900326..41eb2a7c9695 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2725,6 +2725,7 @@ struct ib_device { * It is a NULL terminated array. */ const struct attribute_group *groups[4]; + u8 hw_stats_attr_index; u64 uverbs_cmd_mask; diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h index fe0512116958..555ea3d142a4 100644 --- a/include/rdma/uverbs_std_types.h +++ b/include/rdma/uverbs_std_types.h @@ -34,7 +34,7 @@ static inline void *_uobj_get_obj_read(struct ib_uobject *uobj) { if (IS_ERR(uobj)) - return NULL; + return ERR_CAST(uobj); return uobj->object; } #define uobj_get_obj_read(_object, _type, _id, _attrs) \ diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h index 9eb75683e012..cb2920c601e7 100644 --- a/include/rv/da_monitor.h +++ b/include/rv/da_monitor.h @@ -14,6 +14,7 @@ #include #include #include +#include #ifdef CONFIG_RV_REACTORS @@ -324,10 +325,13 @@ static inline struct da_monitor *da_get_monitor_##name(struct task_struct *tsk) static void da_monitor_reset_all_##name(void) \ { \ struct task_struct *g, *p; \ + int cpu; \ \ read_lock(&tasklist_lock); \ for_each_process_thread(g, p) \ da_monitor_reset_##name(da_get_monitor_##name(p)); \ + for_each_present_cpu(cpu) \ + da_monitor_reset_##name(da_get_monitor_##name(idle_task(cpu))); \ read_unlock(&tasklist_lock); \ } \ \ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 9b5562f54548..99cfaa9347c9 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -941,7 +941,6 @@ struct ocelot_port { phy_interface_t phy_mode; - unsigned int ptp_skbs_in_flight; struct sk_buff_head tx_skbs; unsigned int trap_proto; @@ -949,7 +948,6 @@ struct ocelot_port { u16 mrp_ring_id; u8 ptp_cmd; - u8 ts_id; u8 index; diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h index c601a4598b0d..eb19668a06db 100644 --- a/include/soc/mscc/ocelot_vcap.h +++ b/include/soc/mscc/ocelot_vcap.h @@ -13,6 +13,7 @@ */ #define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port)) #define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port) +#define OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port) ((ocelot)->num_phys_ports + (port)) #define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port) #define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port)) #define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2) @@ -499,6 +500,7 @@ struct ocelot_vcap_key_vlan { struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */ enum ocelot_vcap_bit dei; /* DEI */ enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */ + enum ocelot_vcap_bit tpid; }; struct ocelot_vcap_key_etype { diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index bbb7805e85d8..4ca45d5895df 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h @@ -199,6 +199,7 @@ struct hda_codec { /* beep device */ struct hda_beep *beep; unsigned int beep_mode; + bool beep_just_power_on; /* widget capabilities cache */ u32 *wcaps; diff --git a/include/sound/pcm.h b/include/sound/pcm.h index a2a64f10f3b2..1a567a4c3681 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1434,6 +1434,8 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s #define snd_pcm_lib_mmap_iomem NULL #endif +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime); + /** * snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer * @dma: DMA number diff --git a/include/sound/soc.h b/include/sound/soc.h index 108617cea9c6..d63ac6d9fbdc 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -1141,7 +1141,10 @@ void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd); /* mixer control */ struct soc_mixer_control { - int min, max, platform_max; + /* Minimum and maximum specified as written to the hardware */ + int min, max; + /* Limited maximum value specified as presented through the control */ + int platform_max; int reg, rreg; unsigned int shift, rshift; unsigned int sign_bit; diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index 6a13220d2d27..155c495b89ea 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -21,6 +21,9 @@ #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) +#undef __get_cpumask +#define __get_cpumask(field) (char *)__get_dynamic_array(field) + #undef __get_sockaddr #define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) @@ -40,6 +43,9 @@ #undef __get_rel_bitmask #define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) +#undef __get_rel_cpumask +#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field) + #undef __get_rel_sockaddr #define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 7a6c5a870d33..31847ccae493 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1847,7 +1847,7 @@ DECLARE_EVENT_CLASS(btrfs__prelim_ref, TP_PROTO(const struct btrfs_fs_info *fs_info, const struct prelim_ref *oldref, const struct prelim_ref *newref, u64 tree_size), - TP_ARGS(fs_info, newref, oldref, tree_size), + TP_ARGS(fs_info, oldref, newref, tree_size), TP_STRUCT__entry_btrfs( __field( u64, root_id ) diff --git a/include/trace/events/icmp.h b/include/trace/events/icmp.h new file mode 100644 index 000000000000..31559796949a --- /dev/null +++ b/include/trace/events/icmp.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM icmp + +#if !defined(_TRACE_ICMP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_ICMP_H + +#include +#include + +TRACE_EVENT(icmp_send, + + TP_PROTO(const struct sk_buff *skb, int type, int code), + + TP_ARGS(skb, type, code), + + TP_STRUCT__entry( + __field(const void *, skbaddr) + __field(int, type) + __field(int, code) + __array(__u8, saddr, 4) + __array(__u8, daddr, 4) + __field(__u16, sport) + __field(__u16, dport) + __field(unsigned short, ulen) + ), + + TP_fast_assign( + struct iphdr *iph = ip_hdr(skb); + struct udphdr *uh = udp_hdr(skb); + int proto_4 = iph->protocol; + __be32 *p32; + + __entry->skbaddr = skb; + __entry->type = type; + __entry->code = code; + + if (proto_4 != IPPROTO_UDP || (u8 *)uh < skb->head || + (u8 *)uh + sizeof(struct udphdr) + > skb_tail_pointer(skb)) { + __entry->sport = 0; + __entry->dport = 0; + __entry->ulen = 0; + } else { + __entry->sport = ntohs(uh->source); + __entry->dport = ntohs(uh->dest); + __entry->ulen = ntohs(uh->len); + } + + p32 = (__be32 *) __entry->saddr; + *p32 = iph->saddr; + + p32 = (__be32 *) __entry->daddr; + *p32 = iph->daddr; + ), + + TP_printk("icmp_send: type=%d, code=%d. From %pI4:%u to %pI4:%u ulen=%d skbaddr=%p", + __entry->type, __entry->code, + __entry->saddr, __entry->sport, __entry->daddr, + __entry->dport, __entry->ulen, __entry->skbaddr) +); + +#endif /* _TRACE_ICMP_H */ + +/* This part must be outside protection */ +#include + diff --git a/include/trace/events/oom.h b/include/trace/events/oom.h index 26a11e4a2c36..b799f3bcba82 100644 --- a/include/trace/events/oom.h +++ b/include/trace/events/oom.h @@ -7,6 +7,8 @@ #include #include +#define PG_COUNT_TO_KB(x) ((x) << (PAGE_SHIFT - 10)) + TRACE_EVENT(oom_score_adj_update, TP_PROTO(struct task_struct *task), @@ -72,19 +74,45 @@ TRACE_EVENT(reclaim_retry_zone, ); TRACE_EVENT(mark_victim, - TP_PROTO(int pid), + TP_PROTO(struct task_struct *task, uid_t uid), - TP_ARGS(pid), + TP_ARGS(task, uid), TP_STRUCT__entry( __field(int, pid) + __string(comm, task->comm) + __field(unsigned long, total_vm) + __field(unsigned long, anon_rss) + __field(unsigned long, file_rss) + __field(unsigned long, shmem_rss) + __field(uid_t, uid) + __field(unsigned long, pgtables) + __field(short, oom_score_adj) ), TP_fast_assign( - __entry->pid = pid; + __entry->pid = task->pid; + __assign_str(comm, task->comm); + __entry->total_vm = PG_COUNT_TO_KB(task->mm->total_vm); + __entry->anon_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_ANONPAGES)); + __entry->file_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_FILEPAGES)); + __entry->shmem_rss = PG_COUNT_TO_KB(get_mm_counter(task->mm, MM_SHMEMPAGES)); + __entry->uid = uid; + __entry->pgtables = mm_pgtables_bytes(task->mm) >> 10; + __entry->oom_score_adj = task->signal->oom_score_adj; ), - TP_printk("pid=%d", __entry->pid) + TP_printk("pid=%d comm=%s total-vm=%lukB anon-rss=%lukB file-rss:%lukB shmem-rss:%lukB uid=%u pgtables=%lukB oom_score_adj=%hd", + __entry->pid, + __get_str(comm), + __entry->total_vm, + __entry->anon_rss, + __entry->file_rss, + __entry->shmem_rss, + __entry->uid, + __entry->pgtables, + __entry->oom_score_adj + ) ); TRACE_EVENT(wake_reaper, diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ffe2679a13ce..b70f47a57bf6 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -328,8 +328,7 @@ TRACE_EVENT(rpc_request, { (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \ { (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \ { (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \ - { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \ - { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" }) + { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }) DECLARE_EVENT_CLASS(rpc_task_running, diff --git a/include/trace/perf.h b/include/trace/perf.h index 5800d13146c3..8f3bf1e17707 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -21,6 +21,9 @@ #undef __get_bitmask #define __get_bitmask(field) (char *)__get_dynamic_array(field) +#undef __get_cpumask +#define __get_cpumask(field) (char *)__get_dynamic_array(field) + #undef __get_sockaddr #define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) @@ -41,6 +44,9 @@ #undef __get_rel_bitmask #define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field) +#undef __get_rel_cpumask +#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field) + #undef __get_rel_sockaddr #define __get_rel_sockaddr(field) ((struct sockaddr *)__get_rel_dynamic_array(field)) diff --git a/include/trace/stages/stage1_struct_define.h b/include/trace/stages/stage1_struct_define.h index 1b7bab60434c..69e0dae453bf 100644 --- a/include/trace/stages/stage1_struct_define.h +++ b/include/trace/stages/stage1_struct_define.h @@ -32,6 +32,9 @@ #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1) +#undef __cpumask +#define __cpumask(item) __dynamic_array(char, item, -1) + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) @@ -47,6 +50,9 @@ #undef __rel_bitmask #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1) +#undef __rel_cpumask +#define __rel_cpumask(item) __rel_dynamic_array(char, item, -1) + #undef __rel_sockaddr #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage2_data_offsets.h b/include/trace/stages/stage2_data_offsets.h index 1b7a8f764fdd..469b6a64293d 100644 --- a/include/trace/stages/stage2_data_offsets.h +++ b/include/trace/stages/stage2_data_offsets.h @@ -38,6 +38,9 @@ #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) +#undef __cpumask +#define __cpumask(item) __dynamic_array(unsigned long, item, -1) + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) @@ -53,5 +56,8 @@ #undef __rel_bitmask #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) +#undef __rel_cpumask +#define __rel_cpumask(item) __rel_dynamic_array(unsigned long, item, -1) + #undef __rel_sockaddr #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h index e3b183e9d18e..816c2da7a46f 100644 --- a/include/trace/stages/stage3_trace_output.h +++ b/include/trace/stages/stage3_trace_output.h @@ -42,6 +42,9 @@ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ }) +#undef __get_cpumask +#define __get_cpumask(field) __get_bitmask(field) + #undef __get_rel_bitmask #define __get_rel_bitmask(field) \ ({ \ @@ -51,6 +54,9 @@ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \ }) +#undef __get_rel_cpumask +#define __get_rel_cpumask(field) __get_rel_bitmask(field) + #undef __get_sockaddr #define __get_sockaddr(field) ((struct sockaddr *)__get_dynamic_array(field)) @@ -113,6 +119,14 @@ trace_print_array_seq(p, array, count, el_size); \ }) +#undef __print_dynamic_array +#define __print_dynamic_array(array, el_size) \ + ({ \ + __print_array(__get_dynamic_array(array), \ + __get_dynamic_array_len(array) / (el_size), \ + (el_size)); \ + }) + #undef __print_hex_dump #define __print_hex_dump(prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii) \ diff --git a/include/trace/stages/stage4_event_fields.h b/include/trace/stages/stage4_event_fields.h index fae467ccd0c3..b6f679ae21aa 100644 --- a/include/trace/stages/stage4_event_fields.h +++ b/include/trace/stages/stage4_event_fields.h @@ -47,6 +47,12 @@ #undef __bitmask #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1) +#undef __cpumask +#define __cpumask(item) { \ + .type = "__data_loc cpumask_t", .name = #item, \ + .size = 4, .align = 4, \ + .is_signed = 0, .filter_type = FILTER_OTHER }, + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) @@ -65,5 +71,11 @@ #undef __rel_bitmask #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1) +#undef __rel_cpumask +#define __rel_cpumask(item) { \ + .type = "__rel_loc cpumask_t", .name = #item, \ + .size = 4, .align = 4, \ + .is_signed = 0, .filter_type = FILTER_OTHER }, + #undef __rel_sockaddr #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage5_get_offsets.h b/include/trace/stages/stage5_get_offsets.h index def36fbb8c5c..e30a13be46ba 100644 --- a/include/trace/stages/stage5_get_offsets.h +++ b/include/trace/stages/stage5_get_offsets.h @@ -95,10 +95,16 @@ #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \ __bitmask_size_in_longs(nr_bits)) +#undef __cpumask +#define __cpumask(item) __bitmask(item, nr_cpumask_bits) + #undef __rel_bitmask #define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \ __bitmask_size_in_longs(nr_bits)) +#undef __rel_cpumask +#define __rel_cpumask(item) __rel_bitmask(item, nr_cpumask_bits) + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage6_event_callback.h b/include/trace/stages/stage6_event_callback.h index 3c554a585320..49c32394b53f 100644 --- a/include/trace/stages/stage6_event_callback.h +++ b/include/trace/stages/stage6_event_callback.h @@ -57,6 +57,16 @@ #define __assign_bitmask(dst, src, nr_bits) \ memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) +#undef __cpumask +#define __cpumask(item) __dynamic_array(unsigned long, item, -1) + +#undef __get_cpumask +#define __get_cpumask(field) (char *)__get_dynamic_array(field) + +#undef __assign_cpumask +#define __assign_cpumask(dst, src) \ + memcpy(__get_cpumask(dst), (src), __bitmask_size_in_bytes(nr_cpumask_bits)) + #undef __sockaddr #define __sockaddr(field, len) __dynamic_array(u8, field, len) @@ -98,6 +108,16 @@ #define __assign_rel_bitmask(dst, src, nr_bits) \ memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits)) +#undef __rel_cpumask +#define __rel_cpumask(item) __rel_dynamic_array(unsigned long, item, -1) + +#undef __get_rel_cpumask +#define __get_rel_cpumask(field) (char *)__get_rel_dynamic_array(field) + +#undef __assign_rel_cpumask +#define __assign_rel_cpumask(dst, src) \ + memcpy(__get_rel_cpumask(dst), (src), __bitmask_size_in_bytes(nr_cpumask_bits)) + #undef __rel_sockaddr #define __rel_sockaddr(field, len) __rel_dynamic_array(u8, field, len) diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h index 8a7ec24c246d..5e3e43adb73e 100644 --- a/include/trace/stages/stage7_class_define.h +++ b/include/trace/stages/stage7_class_define.h @@ -13,13 +13,16 @@ #undef __get_dynamic_array_len #undef __get_str #undef __get_bitmask +#undef __get_cpumask #undef __get_sockaddr #undef __get_rel_dynamic_array #undef __get_rel_dynamic_array_len #undef __get_rel_str #undef __get_rel_bitmask +#undef __get_rel_cpumask #undef __get_rel_sockaddr #undef __print_array +#undef __print_dynamic_array #undef __print_hex_dump /* diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index c2f9cabf154d..fa0d51cad57a 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -244,6 +244,9 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ tstruct \ {} }; +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS + #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) @@ -374,11 +377,11 @@ static inline notrace int trace_event_get_offsets_##call( \ #include "stages/stage6_event_callback.h" -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ - \ + +#undef __DECLARE_EVENT_CLASS +#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -trace_event_raw_event_##call(void *__data, proto) \ +do_trace_event_raw_event_##call(void *__data, proto) \ { \ struct trace_event_file *trace_file = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ @@ -403,6 +406,29 @@ trace_event_raw_event_##call(void *__data, proto) \ \ trace_event_buffer_commit(&fbuffer); \ } + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +trace_event_raw_event_##call(void *__data, proto) \ +{ \ + do_trace_event_raw_event_##call(__data, args); \ +} + +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +trace_event_raw_event_##call(void *__data, proto) \ +{ \ + preempt_disable_notrace(); \ + do_trace_event_raw_event_##call(__data, args); \ + preempt_enable_notrace(); \ +} + /* * The ftrace_test_probe is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the ftrace probe will @@ -418,6 +444,8 @@ static inline void ftrace_test_probe_##call(void) \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef __DECLARE_EVENT_CLASS + #include "stages/stage7_class_define.h" #undef DECLARE_EVENT_CLASS diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index c7b056af9ef0..3e5f07be26fc 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -91,7 +91,7 @@ typedef __s64 Elf64_Sxword; #define DT_INIT 12 #define DT_FINI 13 #define DT_SONAME 14 -#define DT_RPATH 15 +#define DT_RPATH 15 #define DT_SYMBOLIC 16 #define DT_REL 17 #define DT_RELSZ 18 diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 1ce8a91349e9..f410c22e080d 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -519,6 +519,7 @@ #define KEY_NOTIFICATION_CENTER 0x1bc /* Show/hide the notification center */ #define KEY_PICKUP_PHONE 0x1bd /* Answer incoming call */ #define KEY_HANGUP_PHONE 0x1be /* Decline incoming call */ +#define KEY_LINK_PHONE 0x1bf /* AL Phone Syncing */ #define KEY_DEL_EOL 0x1c0 #define KEY_DEL_EOS 0x1c1 diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 9eff86acdfec..8f8e39cd183e 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -379,6 +379,7 @@ enum { #define IORING_OFF_SQ_RING 0ULL #define IORING_OFF_CQ_RING 0x8000000ULL #define IORING_OFF_SQES 0x10000000ULL +#define IORING_OFF_MMAP_MASK 0xf8000000ULL /* * Filled with the offset for mmap(2) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 42b60198b6c5..deed930ed305 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -55,6 +55,8 @@ struct kfd_ioctl_get_version_args { #define KFD_MAX_QUEUE_PERCENTAGE 100 #define KFD_MAX_QUEUE_PRIORITY 15 +#define KFD_MIN_QUEUE_RING_SIZE 1024 + struct kfd_ioctl_create_queue_args { __u64 ring_base_address; /* to KFD */ __u64 write_pointer_address; /* from KFD */ diff --git a/include/uapi/linux/landlock.h b/include/uapi/linux/landlock.h index 9c4bcc37a455..17bdfaa9eb87 100644 --- a/include/uapi/linux/landlock.h +++ b/include/uapi/linux/landlock.h @@ -38,9 +38,11 @@ struct landlock_ruleset_attr { * * - %LANDLOCK_CREATE_RULESET_VERSION: Get the highest supported Landlock ABI * version. + * - %LANDLOCK_CREATE_RULESET_ERRATA: Get a bitmask of fixed issues. */ /* clang-format off */ #define LANDLOCK_CREATE_RULESET_VERSION (1U << 0) +#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1) /* clang-format on */ /** diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 51c13cf9c5ae..63a0922937e7 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -174,7 +174,7 @@ enum { #define RTM_GETLINKPROP RTM_GETLINKPROP RTM_NEWVLAN = 112, -#define RTM_NEWNVLAN RTM_NEWVLAN +#define RTM_NEWVLAN RTM_NEWVLAN RTM_DELVLAN, #define RTM_DELVLAN RTM_DELVLAN RTM_GETVLAN, diff --git a/include/uapi/linux/stddef.h b/include/uapi/linux/stddef.h index dcd50fb2164a..ef892cb1cbb7 100644 --- a/include/uapi/linux/stddef.h +++ b/include/uapi/linux/stddef.h @@ -8,6 +8,13 @@ #define __always_inline inline #endif +/* Not all C++ standards support type declarations inside an anonymous union */ +#ifndef __cplusplus +#define __struct_group_tag(TAG) TAG +#else +#define __struct_group_tag(TAG) +#endif + /** * __struct_group() - Create a mirrored named and anonyomous struct * @@ -20,13 +27,13 @@ * and size: one anonymous and one named. The former's members can be used * normally without sub-struct naming, and the latter can be used to * reason about the start, end, and size of the group of struct members. - * The named struct can also be explicitly tagged for layer reuse, as well - * as both having struct attributes appended. + * The named struct can also be explicitly tagged for layer reuse (C only), + * as well as both having struct attributes appended. */ #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ union { \ struct { MEMBERS } ATTRS; \ - struct TAG { MEMBERS } ATTRS NAME; \ + struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \ } ATTRS /** diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index 308433be33c2..c39147653094 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -49,6 +49,7 @@ typedef __u32 __bitwise __wsum; * No conversions are necessary between 32-bit user-space and a 64-bit kernel. */ #define __aligned_u64 __u64 __attribute__((aligned(8))) +#define __aligned_s64 __s64 __attribute__((aligned(8))) #define __aligned_be64 __be64 __attribute__((aligned(8))) #define __aligned_le64 __le64 __attribute__((aligned(8))) diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index 1bba3fead2ce..2a4a6b7b77bb 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -347,8 +347,8 @@ enum { /* Possible values for dExtendedUFSFeaturesSupport */ enum { - UFS_DEV_LOW_TEMP_NOTIF = BIT(4), - UFS_DEV_HIGH_TEMP_NOTIF = BIT(5), + UFS_DEV_HIGH_TEMP_NOTIF = BIT(4), + UFS_DEV_LOW_TEMP_NOTIF = BIT(5), UFS_DEV_EXT_TEMP_NOTIF = BIT(6), UFS_DEV_HPB_SUPPORT = BIT(7), UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index b54f22840dab..0a5637406485 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -272,6 +272,8 @@ struct ufs_pwr_mode_info { * @name: variant name * @init: called when the driver is initialized * @exit: called to cleanup everything done in init + * @set_dma_mask: For setting another DMA mask than indicated by the 64AS + * capability bit. * @get_ufs_hci_version: called to get UFS HCI version * @clk_scale_notify: notifies that clks are scaled up/down * @setup_clocks: called before touching any of the controller registers @@ -281,7 +283,9 @@ struct ufs_pwr_mode_info { * to allow variant specific Uni-Pro initialization. * @pwr_change_notify: called before and after a power mode change * is carried out to allow vendor spesific capabilities - * to be set. + * to be set. PRE_CHANGE can modify final_params based + * on desired_pwr_mode, but POST_CHANGE must not alter + * the final_params parameter * @setup_xfer_req: called before any transfer request is issued * to set some things * @setup_task_mgmt: called before any task management request is issued @@ -303,6 +307,7 @@ struct ufs_hba_variant_ops { int (*init)(struct ufs_hba *); void (*exit)(struct ufs_hba *); u32 (*get_ufs_hci_version)(struct ufs_hba *); + int (*set_dma_mask)(struct ufs_hba *); int (*clk_scale_notify)(struct ufs_hba *, bool, enum ufs_notify_change_status); int (*setup_clocks)(struct ufs_hba *, bool, @@ -312,9 +317,9 @@ struct ufs_hba_variant_ops { int (*link_startup_notify)(struct ufs_hba *, enum ufs_notify_change_status); int (*pwr_change_notify)(struct ufs_hba *, - enum ufs_notify_change_status status, - struct ufs_pa_layer_attr *, - struct ufs_pa_layer_attr *); + enum ufs_notify_change_status status, + struct ufs_pa_layer_attr *desired_pwr_mode, + struct ufs_pa_layer_attr *final_params); void (*setup_xfer_req)(struct ufs_hba *hba, int tag, bool is_scsi_cmd); void (*setup_task_mgmt)(struct ufs_hba *, int, u8); @@ -582,12 +587,6 @@ enum ufshcd_quirks { */ UFSHCD_QUIRK_SKIP_PH_CONFIGURATION = 1 << 16, - /* - * This quirk needs to be enabled if the host controller has - * 64-bit addressing supported capability but it doesn't work. - */ - UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17, - /* * This quirk needs to be enabled if the host controller has * auto-hibernate capability but it's FASTAUTO only. diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h index 464aa6b3a5f9..1c9afbe8cc26 100644 --- a/include/xen/interface/xen-mca.h +++ b/include/xen/interface/xen-mca.h @@ -372,7 +372,7 @@ struct xen_mce { #define XEN_MCE_LOG_LEN 32 struct xen_mce_log { - char signature[12]; /* "MACHINECHECK" */ + char signature[12] __nonstring; /* "MACHINECHECK" */ unsigned len; /* = XEN_MCE_LOG_LEN */ unsigned next; unsigned flags; diff --git a/init/Kconfig b/init/Kconfig index 4699d4772161..ec8afe2f5ed4 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -678,7 +678,7 @@ endmenu # "CPU/Task time and stats accounting" config CPU_ISOLATION bool "CPU isolation" - depends on SMP || COMPILE_TEST + depends on SMP default y help Make sure that CPUs running critical tasks are not disturbed by @@ -1934,7 +1934,7 @@ config RUST depends on !GCC_PLUGINS depends on !RANDSTRUCT depends on !SHADOW_CALL_STACK - depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE + depends on !DEBUG_INFO_BTF || (PAHOLE_HAS_LANG_EXCLUDE && !LTO) help Enables Rust support in the kernel. diff --git a/init/initramfs.c b/init/initramfs.c index 0346f95f0774..4905560cb023 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -365,6 +365,15 @@ static int __init do_name(void) { state = SkipIt; next_state = Reset; + + /* name_len > 0 && name_len <= PATH_MAX checked in do_header */ + if (collected[name_len - 1] != '\0') { + pr_err("initramfs name without nulterm: %.*s\n", + (int)name_len, collected); + error("malformed archive"); + return 1; + } + if (strcmp(collected, "TRAILER!!!") == 0) { free_hash(); return 0; @@ -429,6 +438,12 @@ static int __init do_copy(void) static int __init do_symlink(void) { + if (collected[name_len - 1] != '\0') { + pr_err("initramfs symlink without nulterm: %.*s\n", + (int)name_len, collected); + error("malformed archive"); + return 1; + } collected[N_ALIGN(name_len) + body_len] = '\0'; clean_path(collected, 0); init_symlink(collected + N_ALIGN(name_len), collected); diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index ea2c2ded4e41..4a5053169977 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -79,11 +79,11 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, seq_printf(m, "SqMask:\t0x%x\n", sq_mask); seq_printf(m, "SqHead:\t%u\n", sq_head); seq_printf(m, "SqTail:\t%u\n", sq_tail); - seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); + seq_printf(m, "CachedSqHead:\t%u\n", data_race(ctx->cached_sq_head)); seq_printf(m, "CqMask:\t0x%x\n", cq_mask); seq_printf(m, "CqHead:\t%u\n", cq_head); seq_printf(m, "CqTail:\t%u\n", cq_tail); - seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); + seq_printf(m, "CachedCqTail:\t%u\n", data_race(ctx->cached_cq_tail)); seq_printf(m, "SQEs:\t%u\n", sq_tail - sq_head); sq_entries = min(sq_tail - sq_head, ctx->sq_entries); for (i = 0; i < sq_entries; i++) { diff --git a/io_uring/filetable.c b/io_uring/filetable.c index 4660cb89ea9f..a64b4df0ac9c 100644 --- a/io_uring/filetable.c +++ b/io_uring/filetable.c @@ -98,7 +98,7 @@ static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file, *io_get_tag_slot(ctx->file_data, slot_index) = 0; io_fixed_file_set(file_slot, file); io_file_bitmap_set(&ctx->file_table, slot_index); - return 0; + ret = 0; err: if (needs_switch) io_rsrc_node_switch(ctx, ctx->file_data); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 4f0ae938b146..ad462724246a 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -71,6 +71,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -371,24 +372,6 @@ static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) return req->link; } -static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) -{ - if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) - return NULL; - return __io_prep_linked_timeout(req); -} - -static noinline void __io_arm_ltimeout(struct io_kiocb *req) -{ - io_queue_linked_timeout(__io_prep_linked_timeout(req)); -} - -static inline void io_arm_ltimeout(struct io_kiocb *req) -{ - if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT)) - __io_arm_ltimeout(req); -} - static void io_prep_async_work(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; @@ -434,13 +417,16 @@ static void io_prep_async_link(struct io_kiocb *req) } } -void io_queue_iowq(struct io_kiocb *req, bool *dont_use) +static void io_queue_iowq(struct io_kiocb *req) { - struct io_kiocb *link = io_prep_linked_timeout(req); struct io_uring_task *tctx = req->task->io_uring; BUG_ON(!tctx); - BUG_ON(!tctx->io_wq); + + if ((current->flags & PF_KTHREAD) || !tctx->io_wq) { + io_req_task_queue_fail(req, -ECANCELED); + return; + } /* init ->work of the whole link before punting */ io_prep_async_link(req); @@ -457,8 +443,6 @@ void io_queue_iowq(struct io_kiocb *req, bool *dont_use) trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work)); io_wq_enqueue(tctx->io_wq, &req->work); - if (link) - io_queue_linked_timeout(link); } static __cold void io_queue_deferred(struct io_ring_ctx *ctx) @@ -475,6 +459,13 @@ static __cold void io_queue_deferred(struct io_ring_ctx *ctx) } } +static void io_eventfd_free(struct rcu_head *rcu) +{ + struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu); + + eventfd_ctx_put(ev_fd->cq_ev_fd); + kfree(ev_fd); +} static void io_eventfd_ops(struct rcu_head *rcu) { @@ -488,10 +479,8 @@ static void io_eventfd_ops(struct rcu_head *rcu) * ordering in a race but if references are 0 we know we have to free * it regardless. */ - if (atomic_dec_and_test(&ev_fd->refs)) { - eventfd_ctx_put(ev_fd->cq_ev_fd); - kfree(ev_fd); - } + if (atomic_dec_and_test(&ev_fd->refs)) + call_rcu(&ev_fd->rcu, io_eventfd_free); } static void io_eventfd_signal(struct io_ring_ctx *ctx) @@ -582,6 +571,18 @@ static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) io_cqring_ev_posted(ctx); } +static inline void __io_cq_unlock_post_flush(struct io_ring_ctx *ctx) + __releases(ctx->completion_lock) +{ + io_commit_cqring(ctx); + spin_unlock(&ctx->completion_lock); + io_commit_cqring_flush(ctx); + if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)) { + smp_mb(); + __io_cqring_wake(ctx); + } +} + void io_cq_unlock_post(struct io_ring_ctx *ctx) { __io_cq_unlock_post(ctx); @@ -626,6 +627,7 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) * to care for a non-real case. */ if (need_resched()) { + ctx->cqe_sentinel = ctx->cqe_cached; io_cq_unlock_post(ctx); mutex_unlock(&ctx->uring_lock); cond_resched(); @@ -818,6 +820,14 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, { bool filled; + /* + * If multishot has already posted deferred completions, ensure that + * those are flushed first before posting this one. If not, CQEs + * could get reordered. + */ + if (!wq_list_empty(&ctx->submit_state.compl_reqs)) + __io_submit_flush_completions(ctx); + io_cq_lock(ctx); filled = io_fill_cqe_aux(ctx, user_data, res, cflags, allow_overflow); io_cq_unlock_post(ctx); @@ -1339,7 +1349,7 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx) if (!(req->flags & REQ_F_CQE_SKIP)) __io_fill_cqe_req(ctx, req); } - __io_cq_unlock_post(ctx); + __io_cq_unlock_post_flush(ctx); io_free_batch_list(ctx, state->compl_reqs.first); INIT_WQ_LIST(&state->compl_reqs); @@ -1603,6 +1613,7 @@ bool io_alloc_async_data(struct io_kiocb *req) int io_req_prep_async(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; + int ret; /* assign early for deferred execution for non-fixed file */ if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file) @@ -1615,7 +1626,9 @@ int io_req_prep_async(struct io_kiocb *req) if (io_alloc_async_data(req)) return -EAGAIN; } - return def->prep_async(req); + ret = def->prep_async(req); + io_kbuf_recycle(req, 0); + return ret; } static u32 io_get_sequence(struct io_kiocb *req) @@ -1716,17 +1729,24 @@ static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags) return !!req->file; } +#define REQ_ISSUE_SLOW_FLAGS (REQ_F_CREDS | REQ_F_ARM_LTIMEOUT) + static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) { const struct io_op_def *def = &io_op_defs[req->opcode]; const struct cred *creds = NULL; + struct io_kiocb *link = NULL; int ret; if (unlikely(!io_assign_file(req, issue_flags))) return -EBADF; - if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred())) - creds = override_creds(req->creds); + if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) { + if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) + creds = override_creds(req->creds); + if (req->flags & REQ_F_ARM_LTIMEOUT) + link = __io_prep_linked_timeout(req); + } if (!def->audit_skip) audit_uring_entry(req->opcode); @@ -1736,8 +1756,12 @@ static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) if (!def->audit_skip) audit_uring_exit(!ret, ret); - if (creds) - revert_creds(creds); + if (unlikely(creds || link)) { + if (creds) + revert_creds(creds); + if (link) + io_queue_linked_timeout(link); + } if (ret == IOU_OK) { if (issue_flags & IO_URING_F_COMPLETE_DEFER) @@ -1784,8 +1808,6 @@ void io_wq_submit_work(struct io_wq_work *work) else req_ref_get(req); - io_arm_ltimeout(req); - /* either cancelled or io-wq is dying, so don't touch tctx->iowq */ if (work->flags & IO_WQ_WORK_CANCEL) { fail: @@ -1883,15 +1905,11 @@ struct file *io_file_get_normal(struct io_kiocb *req, int fd) static void io_queue_async(struct io_kiocb *req, int ret) __must_hold(&req->ctx->uring_lock) { - struct io_kiocb *linked_timeout; - if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) { io_req_complete_failed(req, ret); return; } - linked_timeout = io_prep_linked_timeout(req); - switch (io_arm_poll_handler(req, 0)) { case IO_APOLL_READY: io_kbuf_recycle(req, 0); @@ -1899,14 +1917,11 @@ static void io_queue_async(struct io_kiocb *req, int ret) break; case IO_APOLL_ABORTED: io_kbuf_recycle(req, 0); - io_queue_iowq(req, NULL); + io_queue_iowq(req); break; case IO_APOLL_OK: break; } - - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); } static inline void io_queue_sqe(struct io_kiocb *req) @@ -1920,9 +1935,7 @@ static inline void io_queue_sqe(struct io_kiocb *req) * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ - if (likely(!ret)) - io_arm_ltimeout(req); - else + if (unlikely(ret)) io_queue_async(req, ret); } @@ -1948,7 +1961,7 @@ static void io_queue_sqe_fallback(struct io_kiocb *req) if (unlikely(req->ctx->drain_active)) io_drain_req(req); else - io_queue_iowq(req, NULL); + io_queue_iowq(req); } } @@ -2489,23 +2502,118 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; } -static void io_mem_free(void *ptr) +static void io_pages_unmap(void *ptr, struct page ***pages, + unsigned short *npages) { - struct page *page; + bool do_vunmap = false; if (!ptr) return; - page = virt_to_head_page(ptr); - if (put_page_testzero(page)) - free_compound_page(page); + if (*npages) { + struct page **to_free = *pages; + int i; + + /* + * Only did vmap for the non-compound multiple page case. + * For the compound page, we just need to put the head. + */ + if (PageCompound(to_free[0])) + *npages = 1; + else if (*npages > 1) + do_vunmap = true; + for (i = 0; i < *npages; i++) + put_page(to_free[i]); + } + if (do_vunmap) + vunmap(ptr); + kvfree(*pages); + *pages = NULL; + *npages = 0; } -static void *io_mem_alloc(size_t size) +static void io_rings_free(struct io_ring_ctx *ctx) { - gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; + io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages); + io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages); + ctx->rings = NULL; + ctx->sq_sqes = NULL; +} - return (void *) __get_free_pages(gfp, get_order(size)); +static void *io_mem_alloc_compound(struct page **pages, int nr_pages, + size_t size, gfp_t gfp) +{ + struct page *page; + int i, order; + + order = get_order(size); + if (order > 10) + return ERR_PTR(-ENOMEM); + else if (order) + gfp |= __GFP_COMP; + + page = alloc_pages(gfp, order); + if (!page) + return ERR_PTR(-ENOMEM); + + for (i = 0; i < nr_pages; i++) + pages[i] = page + i; + + return page_address(page); +} + +static void *io_mem_alloc_single(struct page **pages, int nr_pages, size_t size, + gfp_t gfp) +{ + void *ret; + int i; + + for (i = 0; i < nr_pages; i++) { + pages[i] = alloc_page(gfp); + if (!pages[i]) + goto err; + } + + ret = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); + if (ret) + return ret; +err: + while (i--) + put_page(pages[i]); + return ERR_PTR(-ENOMEM); +} + +static void *io_pages_map(struct page ***out_pages, unsigned short *npages, + size_t size) +{ + gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN; + struct page **pages; + int nr_pages; + void *ret; + + nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + pages = kvmalloc_array(nr_pages, sizeof(struct page *), gfp); + if (!pages) + return ERR_PTR(-ENOMEM); + + ret = io_mem_alloc_compound(pages, nr_pages, size, gfp); + if (!IS_ERR(ret)) + goto done; + if (nr_pages == 1) + goto fail; + + ret = io_mem_alloc_single(pages, nr_pages, size, gfp); + if (!IS_ERR(ret)) { +done: + *out_pages = pages; + *npages = nr_pages; + return ret; + } +fail: + kvfree(pages); + *out_pages = NULL; + *npages = 0; + return ret; } static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, @@ -2656,8 +2764,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) mmdrop(ctx->mm_account); ctx->mm_account = NULL; } - io_mem_free(ctx->rings); - io_mem_free(ctx->sq_sqes); + io_rings_free(ctx); percpu_ref_exit(&ctx->refs); free_uid(ctx->user); @@ -3075,6 +3182,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd) void __io_uring_cancel(bool cancel_all) { + io_uring_unreg_ringfd(); io_uring_cancel_generic(cancel_all, NULL); } @@ -3089,11 +3197,9 @@ static void *io_uring_validate_mmap_request(struct file *file, switch (offset) { case IORING_OFF_SQ_RING: case IORING_OFF_CQ_RING: - ptr = ctx->rings; - break; + return ctx->rings; case IORING_OFF_SQES: - ptr = ctx->sq_sqes; - break; + return ctx->sq_sqes; default: return ERR_PTR(-EINVAL); } @@ -3105,11 +3211,23 @@ static void *io_uring_validate_mmap_request(struct file *file, return ptr; } +int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma, + struct page **pages, int npages) +{ + unsigned long nr_pages = npages; + + vma->vm_flags |= VM_DONTEXPAND; + return vm_insert_pages(vma, vma->vm_start, pages, &nr_pages); +} + #ifdef CONFIG_MMU static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) { + struct io_ring_ctx *ctx = file->private_data; size_t sz = vma->vm_end - vma->vm_start; + long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned int npages; unsigned long pfn; void *ptr; @@ -3117,6 +3235,16 @@ static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) if (IS_ERR(ptr)) return PTR_ERR(ptr); + switch (offset & IORING_OFF_MMAP_MASK) { + case IORING_OFF_SQ_RING: + case IORING_OFF_CQ_RING: + npages = min(ctx->n_ring_pages, (sz + PAGE_SIZE - 1) >> PAGE_SHIFT); + return io_uring_mmap_pages(ctx, vma, ctx->ring_pages, npages); + case IORING_OFF_SQES: + return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages, + ctx->n_sqe_pages); + } + pfn = virt_to_phys(ptr) >> PAGE_SHIFT; return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); } @@ -3397,6 +3525,7 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, { struct io_rings *rings; size_t size, sq_array_offset; + void *ptr; /* make sure these are sane, as we already accounted them */ ctx->sq_entries = p->sq_entries; @@ -3406,9 +3535,9 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, if (size == SIZE_MAX) return -EOVERFLOW; - rings = io_mem_alloc(size); - if (!rings) - return -ENOMEM; + rings = io_pages_map(&ctx->ring_pages, &ctx->n_ring_pages, size); + if (IS_ERR(rings)) + return PTR_ERR(rings); ctx->rings = rings; ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); @@ -3422,18 +3551,17 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, else size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); if (size == SIZE_MAX) { - io_mem_free(ctx->rings); - ctx->rings = NULL; + io_rings_free(ctx); return -EOVERFLOW; } - ctx->sq_sqes = io_mem_alloc(size); - if (!ctx->sq_sqes) { - io_mem_free(ctx->rings); - ctx->rings = NULL; - return -ENOMEM; + ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size); + if (IS_ERR(ptr)) { + io_rings_free(ctx); + return PTR_ERR(ptr); } + ctx->sq_sqes = ptr; return 0; } diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 3b87f5421eb6..886921d2d58d 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -41,6 +41,8 @@ bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags void __io_commit_cqring_flush(struct io_ring_ctx *ctx); struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); +int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma, + struct page **pages, int npages); struct file *io_file_get_normal(struct io_kiocb *req, int fd); struct file *io_file_get_fixed(struct io_kiocb *req, int fd, @@ -54,7 +56,6 @@ static inline bool io_req_ffs_set(struct io_kiocb *req) void __io_req_task_work_add(struct io_kiocb *req, bool allow_local); bool io_alloc_async_data(struct io_kiocb *req); void io_req_task_queue(struct io_kiocb *req); -void io_queue_iowq(struct io_kiocb *req, bool *dont_use); void io_req_task_complete(struct io_kiocb *req, bool *locked); void io_req_task_queue_fail(struct io_kiocb *req, int ret); void io_req_task_submit(struct io_kiocb *req, bool *locked); diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 55902303d7dc..147ada2ce747 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -336,6 +336,8 @@ int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe p->nbufs = tmp; p->addr = READ_ONCE(sqe->addr); p->len = READ_ONCE(sqe->len); + if (!p->len) + return -EINVAL; if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs, &size)) diff --git a/io_uring/net.c b/io_uring/net.c index f41acabf7b4a..898990e71367 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -282,7 +282,9 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req, if (unlikely(ret)) return ret; - return __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); + ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL); + sr->msg_control = iomsg->msg.msg_control_user; + return ret; } #endif @@ -1389,6 +1391,8 @@ retry: goto retry; io_req_set_res(req, ret, 0); + if (!(issue_flags & IO_URING_F_MULTISHOT)) + return IOU_OK; return IOU_STOP_MULTISHOT; } @@ -1486,6 +1490,11 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) io = &__io; } + if (unlikely(req->flags & REQ_F_FAIL)) { + ret = -ECONNRESET; + goto out; + } + file_flags = force_nonblock ? O_NONBLOCK : 0; ret = __sys_connect_file(req->file, &io->address, diff --git a/io_uring/poll.c b/io_uring/poll.c index a4084acaff91..bbdc9a7624a1 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -288,6 +288,8 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked) return IOU_POLL_REISSUE; } } + if (unlikely(req->cqe.res & EPOLLERR)) + req_set_fail(req); if (req->apoll_events & EPOLLONESHOT) return IOU_POLL_DONE; if (io_is_uring_fops(req->file)) @@ -305,6 +307,8 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked) } } else { int ret = io_poll_issue(req, locked); + io_kbuf_recycle(req, 0); + if (ret == IOU_STOP_MULTISHOT) return IOU_POLL_REMOVE_POLL_USE_RES; if (ret < 0) diff --git a/io_uring/rw.c b/io_uring/rw.c index 9d6e17a244ae..b75f62dccce6 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -167,12 +167,6 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) return NULL; } -static void io_req_task_queue_reissue(struct io_kiocb *req) -{ - req->io_task_work.func = io_queue_iowq; - io_req_task_work_add(req); -} - #ifdef CONFIG_BLOCK static bool io_resubmit_prep(struct io_kiocb *req) { @@ -341,7 +335,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, if (req->flags & REQ_F_REISSUE) { req->flags &= ~REQ_F_REISSUE; if (io_resubmit_prep(req)) - io_req_task_queue_reissue(req); + return -EAGAIN; else io_req_task_queue_fail(req, final_ret); } @@ -691,7 +685,7 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) return 0; } -int io_read(struct io_kiocb *req, unsigned int issue_flags) +static int __io_read(struct io_kiocb *req, unsigned int issue_flags) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); struct io_rw_state __s, *s = &__s; @@ -757,6 +751,14 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) ret = io_iter_do_read(rw, &s->iter); + /* + * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT + * issue, even though they should be returning -EAGAIN. To be safe, + * retry from blocking context for either. + */ + if (ret == -EOPNOTSUPP && force_nonblock) + ret = -EAGAIN; + if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { req->flags &= ~REQ_F_REISSUE; /* if we can poll, just do that */ @@ -770,6 +772,8 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) goto done; ret = 0; } else if (ret == -EIOCBQUEUED) { + req->flags |= REQ_F_PARTIAL_IO; + io_kbuf_recycle(req, issue_flags); if (iovec) kfree(iovec); return IOU_ISSUE_SKIP_COMPLETE; @@ -793,6 +797,9 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) goto done; } + req->flags |= REQ_F_PARTIAL_IO; + io_kbuf_recycle(req, issue_flags); + io = req->async_data; s = &io->s; /* @@ -836,7 +843,18 @@ done: /* it's faster to check here then delegate to kfree */ if (iovec) kfree(iovec); - return kiocb_done(req, ret, issue_flags); + return ret; +} + +int io_read(struct io_kiocb *req, unsigned int issue_flags) +{ + int ret; + + ret = __io_read(req, issue_flags); + if (ret >= 0) + return kiocb_done(req, ret, issue_flags); + + return ret; } static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) @@ -922,6 +940,11 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) else ret2 = -EINVAL; + if (ret2 == -EIOCBQUEUED) { + req->flags |= REQ_F_PARTIAL_IO; + io_kbuf_recycle(req, issue_flags); + } + if (req->flags & REQ_F_REISSUE) { req->flags &= ~REQ_F_REISSUE; ret2 = -EAGAIN; diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 3331daa0aae3..0d66bef60285 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -345,6 +345,7 @@ int io_sqpoll_wait_sq(struct io_ring_ctx *ctx) __cold int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { + struct task_struct *task_to_put = NULL; int ret; /* Retain compatibility with failing for an invalid attach attempt */ @@ -425,6 +426,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, } sqd->thread = tsk; + task_to_put = get_task_struct(tsk); ret = io_uring_alloc_task_context(tsk, ctx); wake_up_new_task(tsk); if (ret) @@ -435,11 +437,15 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, goto err; } + if (task_to_put) + put_task_struct(task_to_put); return 0; err_sqpoll: complete(&ctx->sq_data->exited); err: io_sq_thread_finish(ctx); + if (task_to_put) + put_task_struct(task_to_put); return ret; } diff --git a/io_uring/tctx.c b/io_uring/tctx.c index 4324b1cf1f6a..51b05fda8e45 100644 --- a/io_uring/tctx.c +++ b/io_uring/tctx.c @@ -47,8 +47,19 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, void __io_uring_free(struct task_struct *tsk) { struct io_uring_task *tctx = tsk->io_uring; + struct io_tctx_node *node; + unsigned long index; - WARN_ON_ONCE(!xa_empty(&tctx->xa)); + /* + * Fault injection forcing allocation errors in the xa_store() path + * can lead to xa_empty() returning false, even though no actual + * node is stored in the xarray. Until that gets sorted out, attempt + * an iteration here and warn if any entries are found. + */ + xa_for_each(&tctx->xa, index, node) { + WARN_ON_ONCE(1); + break; + } WARN_ON_ONCE(tctx->io_wq); WARN_ON_ONCE(tctx->cached_refs); diff --git a/ipc/namespace.c b/ipc/namespace.c index 8316ea585733..3075ca4920f1 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -68,13 +68,15 @@ static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, err = msg_init_ns(ns); if (err) - goto fail_put; + goto fail_ipc; sem_init_ns(ns); shm_init_ns(ns); return ns; +fail_ipc: + retire_ipc_sysctls(ns); fail_mq: retire_mq_sysctls(ns); diff --git a/kernel/acct.c b/kernel/acct.c index 034a26daabb2..889fe13b24f6 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -104,48 +104,50 @@ struct bsd_acct_struct { atomic_long_t count; struct rcu_head rcu; struct mutex lock; - int active; + bool active; + bool check_space; unsigned long needcheck; struct file *file; struct pid_namespace *ns; struct work_struct work; struct completion done; + acct_t ac; }; -static void do_acct_process(struct bsd_acct_struct *acct); +static void fill_ac(struct bsd_acct_struct *acct); +static void acct_write_process(struct bsd_acct_struct *acct); /* * Check the amount of free space and suspend/resume accordingly. */ -static int check_free_space(struct bsd_acct_struct *acct) +static bool check_free_space(struct bsd_acct_struct *acct) { struct kstatfs sbuf; - if (time_is_after_jiffies(acct->needcheck)) - goto out; + if (!acct->check_space) + return acct->active; /* May block */ if (vfs_statfs(&acct->file->f_path, &sbuf)) - goto out; + return acct->active; if (acct->active) { u64 suspend = sbuf.f_blocks * SUSPEND; do_div(suspend, 100); if (sbuf.f_bavail <= suspend) { - acct->active = 0; + acct->active = false; pr_info("Process accounting paused\n"); } } else { u64 resume = sbuf.f_blocks * RESUME; do_div(resume, 100); if (sbuf.f_bavail >= resume) { - acct->active = 1; + acct->active = true; pr_info("Process accounting resumed\n"); } } acct->needcheck = jiffies + ACCT_TIMEOUT*HZ; -out: return acct->active; } @@ -190,7 +192,11 @@ static void acct_pin_kill(struct fs_pin *pin) { struct bsd_acct_struct *acct = to_acct(pin); mutex_lock(&acct->lock); - do_acct_process(acct); + /* + * Fill the accounting struct with the exiting task's info + * before punting to the workqueue. + */ + fill_ac(acct); schedule_work(&acct->work); wait_for_completion(&acct->done); cmpxchg(&acct->ns->bacct, pin, NULL); @@ -203,6 +209,9 @@ static void close_work(struct work_struct *work) { struct bsd_acct_struct *acct = container_of(work, struct bsd_acct_struct, work); struct file *file = acct->file; + + /* We were fired by acct_pin_kill() which holds acct->lock. */ + acct_write_process(acct); if (file->f_op->flush) file->f_op->flush(file, NULL); __fput_sync(file); @@ -235,6 +244,20 @@ static int acct_on(struct filename *pathname) return -EACCES; } + /* Exclude kernel kernel internal filesystems. */ + if (file_inode(file)->i_sb->s_flags & (SB_NOUSER | SB_KERNMOUNT)) { + kfree(acct); + filp_close(file, NULL); + return -EINVAL; + } + + /* Exclude procfs and sysfs. */ + if (file_inode(file)->i_sb->s_iflags & SB_I_USERNS_VISIBLE) { + kfree(acct); + filp_close(file, NULL); + return -EINVAL; + } + if (!(file->f_mode & FMODE_CAN_WRITE)) { kfree(acct); filp_close(file, NULL); @@ -431,13 +454,27 @@ static u32 encode_float(u64 value) * do_exit() or when switching to a different output file. */ -static void fill_ac(acct_t *ac) +static void fill_ac(struct bsd_acct_struct *acct) { struct pacct_struct *pacct = ¤t->signal->pacct; + struct file *file = acct->file; + acct_t *ac = &acct->ac; u64 elapsed, run_time; time64_t btime; struct tty_struct *tty; + lockdep_assert_held(&acct->lock); + + if (time_is_after_jiffies(acct->needcheck)) { + acct->check_space = false; + + /* Don't fill in @ac if nothing will be written. */ + if (!acct->active) + return; + } else { + acct->check_space = true; + } + /* * Fill the accounting struct with the needed info as recorded * by the different kernel functions. @@ -485,64 +522,61 @@ static void fill_ac(acct_t *ac) ac->ac_majflt = encode_comp_t(pacct->ac_majflt); ac->ac_exitcode = pacct->ac_exitcode; spin_unlock_irq(¤t->sighand->siglock); -} -/* - * do_acct_process does all actual work. Caller holds the reference to file. - */ -static void do_acct_process(struct bsd_acct_struct *acct) -{ - acct_t ac; - unsigned long flim; - const struct cred *orig_cred; - struct file *file = acct->file; - /* - * Accounting records are not subject to resource limits. - */ - flim = rlimit(RLIMIT_FSIZE); - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; - /* Perform file operations on behalf of whoever enabled accounting */ - orig_cred = override_creds(file->f_cred); - - /* - * First check to see if there is enough free_space to continue - * the process accounting system. - */ - if (!check_free_space(acct)) - goto out; - - fill_ac(&ac); /* we really need to bite the bullet and change layout */ - ac.ac_uid = from_kuid_munged(file->f_cred->user_ns, orig_cred->uid); - ac.ac_gid = from_kgid_munged(file->f_cred->user_ns, orig_cred->gid); + ac->ac_uid = from_kuid_munged(file->f_cred->user_ns, current_uid()); + ac->ac_gid = from_kgid_munged(file->f_cred->user_ns, current_gid()); #if ACCT_VERSION == 1 || ACCT_VERSION == 2 /* backward-compatible 16 bit fields */ - ac.ac_uid16 = ac.ac_uid; - ac.ac_gid16 = ac.ac_gid; + ac->ac_uid16 = ac->ac_uid; + ac->ac_gid16 = ac->ac_gid; #elif ACCT_VERSION == 3 { struct pid_namespace *ns = acct->ns; - ac.ac_pid = task_tgid_nr_ns(current, ns); + ac->ac_pid = task_tgid_nr_ns(current, ns); rcu_read_lock(); - ac.ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), - ns); + ac->ac_ppid = task_tgid_nr_ns(rcu_dereference(current->real_parent), ns); rcu_read_unlock(); } #endif +} + +static void acct_write_process(struct bsd_acct_struct *acct) +{ + struct file *file = acct->file; + const struct cred *cred; + acct_t *ac = &acct->ac; + + /* Perform file operations on behalf of whoever enabled accounting */ + cred = override_creds(file->f_cred); + /* - * Get freeze protection. If the fs is frozen, just skip the write - * as we could deadlock the system otherwise. + * First check to see if there is enough free_space to continue + * the process accounting system. Then get freeze protection. If + * the fs is frozen, just skip the write as we could deadlock + * the system otherwise. */ - if (file_start_write_trylock(file)) { + if (check_free_space(acct) && file_start_write_trylock(file)) { /* it's been opened O_APPEND, so position is irrelevant */ loff_t pos = 0; - __kernel_write(file, &ac, sizeof(acct_t), &pos); + __kernel_write(file, ac, sizeof(acct_t), &pos); file_end_write(file); } -out: + + revert_creds(cred); +} + +static void do_acct_process(struct bsd_acct_struct *acct) +{ + unsigned long flim; + + /* Accounting records are not subject to resource limits. */ + flim = rlimit(RLIMIT_FSIZE); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; + fill_ac(acct); + acct_write_process(acct); current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; - revert_creds(orig_cred); } /** diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 0ea0d50a7c16..c281f5b8705e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -523,6 +523,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) { + int err; + /* Branch offsets can't overflow when program is shrinking, no need * to call bpf_adj_branches(..., true) here */ @@ -530,7 +532,9 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) sizeof(struct bpf_insn) * (prog->len - off - cnt)); prog->len -= cnt; - return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); + err = bpf_adj_branches(prog, off, off + cnt, off, false); + WARN_ON_ONCE(err); + return err; } static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) @@ -2117,6 +2121,7 @@ bool bpf_prog_map_compatible(struct bpf_map *map, { enum bpf_prog_type prog_type = resolve_prog_type(fp); bool ret; + struct bpf_prog_aux *aux = fp->aux; if (fp->kprobe_override) return false; @@ -2128,12 +2133,26 @@ bool bpf_prog_map_compatible(struct bpf_map *map, */ map->owner.type = prog_type; map->owner.jited = fp->jited; - map->owner.xdp_has_frags = fp->aux->xdp_has_frags; + map->owner.xdp_has_frags = aux->xdp_has_frags; + map->owner.attach_func_proto = aux->attach_func_proto; ret = true; } else { ret = map->owner.type == prog_type && map->owner.jited == fp->jited && - map->owner.xdp_has_frags == fp->aux->xdp_has_frags; + map->owner.xdp_has_frags == aux->xdp_has_frags; + if (ret && + map->owner.attach_func_proto != aux->attach_func_proto) { + switch (prog_type) { + case BPF_PROG_TYPE_TRACING: + case BPF_PROG_TYPE_LSM: + case BPF_PROG_TYPE_EXT: + case BPF_PROG_TYPE_STRUCT_OPS: + ret = false; + break; + default: + break; + } + } } spin_unlock(&map->owner.lock); diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 9699c30c3dc4..ac1d5dbc8918 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -184,7 +184,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) static void dev_map_free(struct bpf_map *map) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); - int i; + u32 i; /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, * so the programs (can be more than one that used this map) were @@ -807,7 +807,7 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct bpf_dtab_netdev *old_dev; - int k = *(u32 *)key; + u32 k = *(u32 *)key; if (k >= map->max_entries) return -EINVAL; @@ -822,7 +822,7 @@ static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) { struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); struct bpf_dtab_netdev *old_dev; - int k = *(u32 *)key; + u32 k = *(u32 *)key; unsigned long flags; int ret = -ENOENT; diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index dae9ed02a75b..06bc7f26be06 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -2172,7 +2172,7 @@ static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_f b = &htab->buckets[i]; rcu_read_lock(); head = &b->head; - hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { + hlist_nulls_for_each_entry_safe(elem, n, head, hash_node) { key = elem->key; if (is_percpu) { /* current cpu value for percpu map */ diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index a3fc4e2e8256..4fef0a015525 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -107,7 +107,7 @@ const struct bpf_func_proto bpf_map_pop_elem_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, + .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, }; BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) @@ -120,7 +120,7 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, - .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT, + .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE, }; BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu) @@ -531,7 +531,8 @@ const struct bpf_func_proto bpf_strtol_proto = { .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, + .arg4_size = sizeof(s64), }; BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, @@ -560,7 +561,8 @@ const struct bpf_func_proto bpf_strtoul_proto = { .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY, .arg2_type = ARG_CONST_SIZE, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, + .arg4_size = sizeof(u64), }; BPF_CALL_3(bpf_strncmp, const char *, s1, u32, s1_sz, const char *, s2) @@ -1500,7 +1502,7 @@ static const struct bpf_func_proto bpf_dynptr_from_mem_proto = { .arg1_type = ARG_PTR_TO_UNINIT_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT, + .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE, }; BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src, diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index d8ddb1e245d9..fd6e31e72290 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -302,12 +302,22 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie, return node; } +static int trie_check_add_elem(struct lpm_trie *trie, u64 flags) +{ + if (flags == BPF_EXIST) + return -ENOENT; + if (trie->n_entries == trie->map.max_entries) + return -ENOSPC; + trie->n_entries++; + return 0; +} + /* Called from syscall or from eBPF program */ static int trie_update_elem(struct bpf_map *map, void *_key, void *value, u64 flags) { struct lpm_trie *trie = container_of(map, struct lpm_trie, map); - struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL; + struct lpm_trie_node *node, *im_node, *new_node = NULL; struct lpm_trie_node *free_node = NULL; struct lpm_trie_node __rcu **slot; struct bpf_lpm_trie_key_u8 *key = _key; @@ -325,20 +335,12 @@ static int trie_update_elem(struct bpf_map *map, spin_lock_irqsave(&trie->lock, irq_flags); /* Allocate and fill a new node */ - - if (trie->n_entries == trie->map.max_entries) { - ret = -ENOSPC; - goto out; - } - new_node = lpm_trie_node_alloc(trie, value); if (!new_node) { ret = -ENOMEM; goto out; } - trie->n_entries++; - new_node->prefixlen = key->prefixlen; RCU_INIT_POINTER(new_node->child[0], NULL); RCU_INIT_POINTER(new_node->child[1], NULL); @@ -368,6 +370,10 @@ static int trie_update_elem(struct bpf_map *map, * simply assign the @new_node to that slot and be done. */ if (!node) { + ret = trie_check_add_elem(trie, flags); + if (ret) + goto out; + rcu_assign_pointer(*slot, new_node); goto out; } @@ -376,18 +382,30 @@ static int trie_update_elem(struct bpf_map *map, * which already has the correct data array set. */ if (node->prefixlen == matchlen) { + if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) { + if (flags == BPF_NOEXIST) { + ret = -EEXIST; + goto out; + } + } else { + ret = trie_check_add_elem(trie, flags); + if (ret) + goto out; + } + new_node->child[0] = node->child[0]; new_node->child[1] = node->child[1]; - if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) - trie->n_entries--; - rcu_assign_pointer(*slot, new_node); free_node = node; goto out; } + ret = trie_check_add_elem(trie, flags); + if (ret) + goto out; + /* If the new node matches the prefix completely, it must be inserted * as an ancestor. Simply insert it between @node and *@slot. */ @@ -400,6 +418,7 @@ static int trie_update_elem(struct bpf_map *map, im_node = lpm_trie_node_alloc(trie, NULL); if (!im_node) { + trie->n_entries--; ret = -ENOMEM; goto out; } @@ -421,14 +440,8 @@ static int trie_update_elem(struct bpf_map *map, rcu_assign_pointer(*slot, im_node); out: - if (ret) { - if (new_node) - trie->n_entries--; - + if (ret) kfree(new_node); - kfree(im_node); - } - spin_unlock_irqrestore(&trie->lock, irq_flags); kfree_rcu(free_node, rcu); @@ -628,7 +641,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) struct lpm_trie_node **node_stack = NULL; int err = 0, stack_ptr = -1; unsigned int next_bit; - size_t matchlen; + size_t matchlen = 0; /* The get_next_key follows postorder. For the 4 node example in * the top of this file, the trie_get_next_key() returns the following @@ -667,7 +680,7 @@ static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) next_bit = extract_bit(key->data, node->prefixlen); node = rcu_dereference(node->child[next_bit]); } - if (!node || node->prefixlen != key->prefixlen || + if (!node || node->prefixlen != matchlen || (node->flags & LPM_TREE_NODE_FLAG_IM)) goto find_leftmost; diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index af75c54eb84f..095416e40df3 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -618,7 +618,7 @@ const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = { .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT, + .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT | MEM_WRITE, }; BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 42f5b37a74c6..b145f3ef3695 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -813,7 +813,7 @@ static const struct vm_operations_struct bpf_map_default_vmops = { static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) { struct bpf_map *map = filp->private_data; - int err; + int err = 0; if (!map->ops->map_mmap || map_value_has_spin_lock(map) || map_value_has_timer(map) || map_value_has_kptrs(map)) @@ -838,7 +838,12 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) err = -EACCES; goto out; } + bpf_map_write_active_inc(map); } +out: + mutex_unlock(&map->freeze_mutex); + if (err) + return err; /* set default open/close callbacks */ vma->vm_ops = &bpf_map_default_vmops; @@ -849,13 +854,11 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags &= ~VM_MAYWRITE; err = map->ops->map_mmap(map, vma); - if (err) - goto out; + if (err) { + if (vma->vm_flags & VM_WRITE) + bpf_map_write_active_dec(map); + } - if (vma->vm_flags & VM_MAYWRITE) - bpf_map_write_active_inc(map); -out: - mutex_unlock(&map->freeze_mutex); return err; } @@ -1713,8 +1716,6 @@ int generic_map_update_batch(struct bpf_map *map, return err; } -#define MAP_LOOKUP_RETRIES 3 - int generic_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr, union bpf_attr __user *uattr) @@ -1724,8 +1725,8 @@ int generic_map_lookup_batch(struct bpf_map *map, void __user *values = u64_to_user_ptr(attr->batch.values); void __user *keys = u64_to_user_ptr(attr->batch.keys); void *buf, *buf_prevkey, *prev_key, *key, *value; - int err, retry = MAP_LOOKUP_RETRIES; u32 value_size, cp, max_count; + int err; if (attr->batch.elem_flags & ~BPF_F_LOCK) return -EINVAL; @@ -1771,14 +1772,8 @@ int generic_map_lookup_batch(struct bpf_map *map, err = bpf_map_copy_value(map, key, value, attr->batch.elem_flags); - if (err == -ENOENT) { - if (retry) { - retry--; - continue; - } - err = -EINTR; - break; - } + if (err == -ENOENT) + goto next_key; if (err) goto free_buf; @@ -1793,12 +1788,12 @@ int generic_map_lookup_batch(struct bpf_map *map, goto free_buf; } + cp++; +next_key: if (!prev_key) prev_key = buf_prevkey; swap(prev_key, key); - retry = MAP_LOOKUP_RETRIES; - cp++; cond_resched(); } @@ -2816,16 +2811,21 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) { const struct bpf_link *link = filp->private_data; const struct bpf_prog *prog = link->prog; + enum bpf_link_type type = link->type; char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; + if (type < ARRAY_SIZE(bpf_link_type_strs) && bpf_link_type_strs[type]) { + seq_printf(m, "link_type:\t%s\n", bpf_link_type_strs[type]); + } else { + WARN_ONCE(1, "missing BPF_LINK_TYPE(...) for link type %u\n", type); + seq_printf(m, "link_type:\t<%u>\n", type); + } + seq_printf(m, "link_id:\t%u\n", link->id); + bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); seq_printf(m, - "link_type:\t%s\n" - "link_id:\t%u\n" "prog_tag:\t%s\n" "prog_id:\t%u\n", - bpf_link_type_strs[link->type], - link->id, prog_tag, prog->aux->id); if (link->ops->show_fdinfo) @@ -4005,6 +4005,8 @@ static int bpf_prog_get_info_by_fd(struct file *file, info.recursion_misses = stats.misses; info.verified_insns = prog->aux->verified_insns; + if (prog->aux->btf) + info.btf_id = btf_obj_id(prog->aux->btf); if (!bpf_capable()) { info.jited_prog_len = 0; @@ -4151,8 +4153,6 @@ static int bpf_prog_get_info_by_fd(struct file *file, } } - if (prog->aux->btf) - info.btf_id = btf_obj_id(prog->aux->btf); info.attach_btf_id = prog->aux->attach_btf_id; if (attach_btf) info.attach_btf_obj_id = btf_obj_id(attach_btf); @@ -5260,7 +5260,8 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = { .arg1_type = ARG_PTR_TO_MEM, .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, - .arg4_type = ARG_PTR_TO_LONG, + .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, + .arg4_size = sizeof(u64), }; static const struct bpf_func_proto * diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index da90f565317d..ead1811534a0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5416,7 +5416,8 @@ mark: } static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, - int access_size, bool zero_size_allowed, + int access_size, enum bpf_access_type access_type, + bool zero_size_allowed, struct bpf_call_arg_meta *meta) { struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; @@ -5428,7 +5429,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, return check_packet_access(env, regno, reg->off, access_size, zero_size_allowed); case PTR_TO_MAP_KEY: - if (meta && meta->raw_mode) { + if (access_type == BPF_WRITE) { verbose(env, "R%d cannot write into %s\n", regno, reg_type_str(env, reg->type)); return -EACCES; @@ -5436,15 +5437,13 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, return check_mem_region_access(env, regno, reg->off, access_size, reg->map_ptr->key_size, false); case PTR_TO_MAP_VALUE: - if (check_map_access_type(env, regno, reg->off, access_size, - meta && meta->raw_mode ? BPF_WRITE : - BPF_READ)) + if (check_map_access_type(env, regno, reg->off, access_size, access_type)) return -EACCES; return check_map_access(env, regno, reg->off, access_size, zero_size_allowed, ACCESS_HELPER); case PTR_TO_MEM: if (type_is_rdonly_mem(reg->type)) { - if (meta && meta->raw_mode) { + if (access_type == BPF_WRITE) { verbose(env, "R%d cannot write into %s\n", regno, reg_type_str(env, reg->type)); return -EACCES; @@ -5455,7 +5454,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, zero_size_allowed); case PTR_TO_BUF: if (type_is_rdonly_mem(reg->type)) { - if (meta && meta->raw_mode) { + if (access_type == BPF_WRITE) { verbose(env, "R%d cannot write into %s\n", regno, reg_type_str(env, reg->type)); return -EACCES; @@ -5480,7 +5479,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, * Dynamically check it now. */ if (!env->ops->convert_ctx_access) { - enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ; int offset = access_size - 1; /* Allow zero-byte read from PTR_TO_CTX */ @@ -5488,7 +5486,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, return zero_size_allowed ? 0 : -EACCES; return check_mem_access(env, env->insn_idx, regno, offset, BPF_B, - atype, -1, false); + access_type, -1, false); } fallthrough; @@ -5507,6 +5505,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, static int check_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, u32 regno, + enum bpf_access_type access_type, bool zero_size_allowed, struct bpf_call_arg_meta *meta) { @@ -5522,15 +5521,12 @@ static int check_mem_size_reg(struct bpf_verifier_env *env, */ meta->msize_max_value = reg->umax_value; - /* The register is SCALAR_VALUE; the access check - * happens using its boundaries. + /* The register is SCALAR_VALUE; the access check happens using + * its boundaries. For unprivileged variable accesses, disable + * raw mode so that the program is required to initialize all + * the memory that the helper could just partially fill up. */ if (!tnum_is_const(reg->var_off)) - /* For unprivileged variable accesses, disable raw - * mode so that the program is required to - * initialize all the memory that the helper could - * just partially fill up. - */ meta = NULL; if (reg->smin_value < 0) { @@ -5541,8 +5537,7 @@ static int check_mem_size_reg(struct bpf_verifier_env *env, if (reg->umin_value == 0) { err = check_helper_mem_access(env, regno - 1, 0, - zero_size_allowed, - meta); + access_type, zero_size_allowed, meta); if (err) return err; } @@ -5552,9 +5547,8 @@ static int check_mem_size_reg(struct bpf_verifier_env *env, regno); return -EACCES; } - err = check_helper_mem_access(env, regno - 1, - reg->umax_value, - zero_size_allowed, meta); + err = check_helper_mem_access(env, regno - 1, reg->umax_value, + access_type, zero_size_allowed, meta); if (!err) err = mark_chain_precision(env, regno); return err; @@ -5565,13 +5559,11 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, { bool may_be_null = type_may_be_null(reg->type); struct bpf_reg_state saved_reg; - struct bpf_call_arg_meta meta; int err; if (register_is_null(reg)) return 0; - memset(&meta, 0, sizeof(meta)); /* Assuming that the register contains a value check if the memory * access is safe. Temporarily save and restore the register's state as * the conversion shouldn't be visible to a caller. @@ -5581,10 +5573,8 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, mark_ptr_not_null_reg(reg); } - err = check_helper_mem_access(env, regno, mem_size, true, &meta); - /* Check access for BPF_WRITE */ - meta.raw_mode = true; - err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta); + err = check_helper_mem_access(env, regno, mem_size, BPF_READ, true, NULL); + err = err ?: check_helper_mem_access(env, regno, mem_size, BPF_WRITE, true, NULL); if (may_be_null) *reg = saved_reg; @@ -5610,13 +5600,12 @@ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state mark_ptr_not_null_reg(mem_reg); } - err = check_mem_size_reg(env, reg, regno, true, &meta); - /* Check access for BPF_WRITE */ - meta.raw_mode = true; - err = err ?: check_mem_size_reg(env, reg, regno, true, &meta); + err = check_mem_size_reg(env, reg, regno, BPF_READ, true, &meta); + err = err ?: check_mem_size_reg(env, reg, regno, BPF_WRITE, true, &meta); if (may_be_null) *mem_reg = saved_reg; + return err; } @@ -5818,16 +5807,6 @@ static bool arg_type_is_dynptr(enum bpf_arg_type type) return base_type(type) == ARG_PTR_TO_DYNPTR; } -static int int_ptr_type_to_size(enum bpf_arg_type type) -{ - if (type == ARG_PTR_TO_INT) - return sizeof(u32); - else if (type == ARG_PTR_TO_LONG) - return sizeof(u64); - - return -EINVAL; -} - static int resolve_map_arg_type(struct bpf_verifier_env *env, const struct bpf_call_arg_meta *meta, enum bpf_arg_type *arg_type) @@ -5908,16 +5887,6 @@ static const struct bpf_reg_types mem_types = { }, }; -static const struct bpf_reg_types int_ptr_types = { - .types = { - PTR_TO_STACK, - PTR_TO_PACKET, - PTR_TO_PACKET_META, - PTR_TO_MAP_KEY, - PTR_TO_MAP_VALUE, - }, -}; - static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } }; static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } }; static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } }; @@ -5955,8 +5924,6 @@ static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = { [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types, [ARG_PTR_TO_MEM] = &mem_types, [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types, - [ARG_PTR_TO_INT] = &int_ptr_types, - [ARG_PTR_TO_LONG] = &int_ptr_types, [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types, [ARG_PTR_TO_FUNC] = &func_ptr_types, [ARG_PTR_TO_STACK] = &stack_ptr_types, @@ -6249,9 +6216,8 @@ skip_type_check: verbose(env, "invalid map_ptr to access map->key\n"); return -EACCES; } - err = check_helper_mem_access(env, regno, - meta->map_ptr->key_size, false, - NULL); + err = check_helper_mem_access(env, regno, meta->map_ptr->key_size, + BPF_READ, false, NULL); break; case ARG_PTR_TO_MAP_VALUE: if (type_may_be_null(arg_type) && register_is_null(reg)) @@ -6266,9 +6232,9 @@ skip_type_check: return -EACCES; } meta->raw_mode = arg_type & MEM_UNINIT; - err = check_helper_mem_access(env, regno, - meta->map_ptr->value_size, false, - meta); + err = check_helper_mem_access(env, regno, meta->map_ptr->value_size, + arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ, + false, meta); break; case ARG_PTR_TO_PERCPU_BTF_ID: if (!reg->btf_id) { @@ -6303,16 +6269,26 @@ skip_type_check: */ meta->raw_mode = arg_type & MEM_UNINIT; if (arg_type & MEM_FIXED_SIZE) { - err = check_helper_mem_access(env, regno, - fn->arg_size[arg], false, - meta); + err = check_helper_mem_access(env, regno, fn->arg_size[arg], + arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ, + false, meta); + if (err) + return err; + if (arg_type & MEM_ALIGNED) + err = check_ptr_alignment(env, reg, 0, fn->arg_size[arg], true); } break; case ARG_CONST_SIZE: - err = check_mem_size_reg(env, reg, regno, false, meta); + err = check_mem_size_reg(env, reg, regno, + fn->arg_type[arg - 1] & MEM_WRITE ? + BPF_WRITE : BPF_READ, + false, meta); break; case ARG_CONST_SIZE_OR_ZERO: - err = check_mem_size_reg(env, reg, regno, true, meta); + err = check_mem_size_reg(env, reg, regno, + fn->arg_type[arg - 1] & MEM_WRITE ? + BPF_WRITE : BPF_READ, + true, meta); break; case ARG_PTR_TO_DYNPTR: /* We only need to check for initialized / uninitialized helper @@ -6373,17 +6349,6 @@ skip_type_check: if (err) return err; break; - case ARG_PTR_TO_INT: - case ARG_PTR_TO_LONG: - { - int size = int_ptr_type_to_size(arg_type); - - err = check_helper_mem_access(env, regno, size, false, meta); - if (err) - return err; - err = check_ptr_alignment(env, reg, 0, size, true); - break; - } case ARG_PTR_TO_CONST_STR: { struct bpf_map *map = reg->map_ptr; @@ -10381,8 +10346,11 @@ static void find_equal_scalars(struct bpf_verifier_state *vstate, struct bpf_reg_state *reg; bpf_for_each_reg_in_vstate(vstate, state, reg, ({ - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) + if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) { + s32 saved_subreg_def = reg->subreg_def; copy_register_state(reg, known_reg); + reg->subreg_def = saved_subreg_def; + } })); } diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 2ca4aeb21a44..7997c8021b62 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -90,7 +90,7 @@ DEFINE_MUTEX(cgroup_mutex); DEFINE_SPINLOCK(css_set_lock); -#ifdef CONFIG_PROVE_RCU +#if (defined CONFIG_PROVE_RCU || defined CONFIG_LOCKDEP) EXPORT_SYMBOL_GPL(cgroup_mutex); EXPORT_SYMBOL_GPL(css_set_lock); #endif @@ -2119,8 +2119,10 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) if (ret) goto exit_stats; - ret = cgroup_bpf_inherit(root_cgrp); - WARN_ON_ONCE(ret); + if (root == &cgrp_dfl_root) { + ret = cgroup_bpf_inherit(root_cgrp); + WARN_ON_ONCE(ret); + } trace_cgroup_setup_root(root); @@ -2293,10 +2295,8 @@ static void cgroup_kill_sb(struct super_block *sb) * And don't kill the default root. */ if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root && - !percpu_ref_is_dying(&root->cgrp.self.refcnt)) { - cgroup_bpf_offline(&root->cgrp); + !percpu_ref_is_dying(&root->cgrp.self.refcnt)) percpu_ref_kill(&root->cgrp.self.refcnt); - } cgroup_put(&root->cgrp); kernfs_kill_sb(sb); } @@ -3952,7 +3952,7 @@ static void __cgroup_kill(struct cgroup *cgrp) lockdep_assert_held(&cgroup_mutex); spin_lock_irq(&css_set_lock); - set_bit(CGRP_KILL, &cgrp->flags); + cgrp->kill_seq++; spin_unlock_irq(&css_set_lock); css_task_iter_start(&cgrp->self, CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED, &it); @@ -3968,10 +3968,6 @@ static void __cgroup_kill(struct cgroup *cgrp) send_sig(SIGKILL, task, 0); } css_task_iter_end(&it); - - spin_lock_irq(&css_set_lock); - clear_bit(CGRP_KILL, &cgrp->flags); - spin_unlock_irq(&css_set_lock); } static void cgroup_kill(struct cgroup *cgrp) @@ -5628,9 +5624,11 @@ static struct cgroup *cgroup_create(struct cgroup *parent, const char *name, if (ret) goto out_kernfs_remove; - ret = cgroup_bpf_inherit(cgrp); - if (ret) - goto out_psi_free; + if (cgrp->root == &cgrp_dfl_root) { + ret = cgroup_bpf_inherit(cgrp); + if (ret) + goto out_psi_free; + } /* * New cgroup inherits effective freeze counter, and @@ -5948,7 +5946,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) cgroup1_check_for_release(parent); - cgroup_bpf_offline(cgrp); + if (cgrp->root == &cgrp_dfl_root) + cgroup_bpf_offline(cgrp); /* put the base reference */ percpu_ref_kill(&cgrp->self.refcnt); @@ -6406,6 +6405,10 @@ static int cgroup_css_set_fork(struct kernel_clone_args *kargs) spin_lock_irq(&css_set_lock); cset = task_css_set(current); get_css_set(cset); + if (kargs->cgrp) + kargs->kill_seq = kargs->cgrp->kill_seq; + else + kargs->kill_seq = cset->dfl_cgrp->kill_seq; spin_unlock_irq(&css_set_lock); if (!(kargs->flags & CLONE_INTO_CGROUP)) { @@ -6589,6 +6592,7 @@ void cgroup_post_fork(struct task_struct *child, struct kernel_clone_args *kargs) __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex) { + unsigned int cgrp_kill_seq = 0; unsigned long cgrp_flags = 0; bool kill = false; struct cgroup_subsys *ss; @@ -6602,10 +6606,13 @@ void cgroup_post_fork(struct task_struct *child, /* init tasks are special, only link regular threads */ if (likely(child->pid)) { - if (kargs->cgrp) + if (kargs->cgrp) { cgrp_flags = kargs->cgrp->flags; - else + cgrp_kill_seq = kargs->cgrp->kill_seq; + } else { cgrp_flags = cset->dfl_cgrp->flags; + cgrp_kill_seq = cset->dfl_cgrp->kill_seq; + } WARN_ON_ONCE(!list_empty(&child->cg_list)); cset->nr_tasks++; @@ -6640,7 +6647,7 @@ void cgroup_post_fork(struct task_struct *child, * child down right after we finished preparing it for * userspace. */ - kill = test_bit(CGRP_KILL, &cgrp_flags); + kill = kargs->kill_seq != cgrp_kill_seq; } spin_unlock_irq(&css_set_lock); diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 7006fc8dd677..0ae90c15cad8 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -477,7 +477,6 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) cputime->sum_exec_runtime += user; cputime->sum_exec_runtime += sys; - cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL]; #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; diff --git a/kernel/cpu.c b/kernel/cpu.c index da61c2737e2e..c32b5a45e5e4 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1824,7 +1824,7 @@ static struct cpuhp_step cpuhp_hp_states[] = { }, [CPUHP_AP_HRTIMERS_DYING] = { .name = "hrtimers:dying", - .startup.single = NULL, + .startup.single = hrtimers_cpu_starting, .teardown.single = hrtimers_cpu_dying, }, diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index d545abe08087..95bccbe3495c 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -576,6 +576,8 @@ static void kdb_msg_write(const char *msg, int msg_len) continue; if (c == dbg_io_ops->cons) continue; + if (!c->write) + continue; /* * Set oops_in_progress to encourage the console drivers to * disregard their internal spin locks: in the current calling diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index d06c5982c2c0..c16889887ca1 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -70,8 +70,7 @@ EXPORT_SYMBOL_GPL(dma_contiguous_default_area); * Users, who want to set the size of global CMA area for their system * should use cma= kernel parameter. */ -static const phys_addr_t size_bytes __initconst = - (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; +#define size_bytes ((phys_addr_t)CMA_SIZE_MBYTES * SZ_1M) static phys_addr_t size_cmdline __initdata = -1; static phys_addr_t base_cmdline __initdata; static phys_addr_t limit_cmdline __initdata; diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index de02c0808fb8..3e6f2a39beb3 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -1044,9 +1044,13 @@ static void check_unmap(struct dma_debug_entry *ref) } hash_bucket_del(entry); - dma_entry_free(entry); - put_hash_bucket(bucket, flags); + + /* + * Free the entry outside of bucket_lock to avoid ABBA deadlocks + * between that and radix_lock. + */ + dma_entry_free(entry); } static void check_for_stack(struct device *dev, diff --git a/kernel/events/core.c b/kernel/events/core.c index 4995eb68b78b..552bb00bfceb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1180,6 +1180,12 @@ static void perf_event_ctx_deactivate(struct perf_event_context *ctx) list_del_init(&ctx->active_ctx_list); } +static inline void perf_pmu_read(struct perf_event *event) +{ + if (event->state == PERF_EVENT_STATE_ACTIVE) + event->pmu->read(event); +} + static void get_ctx(struct perf_event_context *ctx) { refcount_inc(&ctx->refcount); @@ -3389,8 +3395,7 @@ static void __perf_event_sync_stat(struct perf_event *event, * we know the event must be on the current CPU, therefore we * don't need to use it. */ - if (event->state == PERF_EVENT_STATE_ACTIVE) - event->pmu->read(event); + perf_pmu_read(event); perf_event_update_time(event); @@ -4444,15 +4449,8 @@ static void __perf_event_read(void *info) pmu->read(event); - for_each_sibling_event(sub, event) { - if (sub->state == PERF_EVENT_STATE_ACTIVE) { - /* - * Use sibling's PMU rather than @event's since - * sibling could be on different (eg: software) PMU. - */ - sub->pmu->read(sub); - } - } + for_each_sibling_event(sub, event) + perf_pmu_read(sub); data->ret = pmu->commit_txn(pmu); @@ -5679,14 +5677,15 @@ static int _perf_event_period(struct perf_event *event, u64 value) if (!value) return -EINVAL; - if (event->attr.freq && value > sysctl_perf_event_sample_rate) - return -EINVAL; - - if (perf_event_check_period(event, value)) - return -EINVAL; - - if (!event->attr.freq && (value & (1ULL << 63))) - return -EINVAL; + if (event->attr.freq) { + if (value > sysctl_perf_event_sample_rate) + return -EINVAL; + } else { + if (perf_event_check_period(event, value)) + return -EINVAL; + if (value & (1ULL << 63)) + return -EINVAL; + } event_function_call(event, __perf_event_period, &value); @@ -7100,9 +7099,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) values[n++] = running; - if ((leader != event) && - (leader->state == PERF_EVENT_STATE_ACTIVE)) - leader->pmu->read(leader); + if ((leader != event) && !handle->skip_read) + perf_pmu_read(leader); values[n++] = perf_event_count(leader); if (read_format & PERF_FORMAT_ID) @@ -7115,9 +7113,8 @@ static void perf_output_read_group(struct perf_output_handle *handle, for_each_sibling_event(sub, leader) { n = 0; - if ((sub != event) && - (sub->state == PERF_EVENT_STATE_ACTIVE)) - sub->pmu->read(sub); + if ((sub != event) && !handle->skip_read) + perf_pmu_read(sub); values[n++] = perf_event_count(sub); if (read_format & PERF_FORMAT_ID) @@ -7172,6 +7169,9 @@ void perf_output_sample(struct perf_output_handle *handle, { u64 sample_type = data->type; + if (data->sample_flags & PERF_SAMPLE_READ) + handle->skip_read = 1; + perf_output_put(handle, *header); if (sample_type & PERF_SAMPLE_IDENTIFIER) diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index c3797701339c..382a3b04f6d3 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -978,9 +978,10 @@ static int hw_breakpoint_event_init(struct perf_event *bp) return -ENOENT; /* - * no branch sampling for breakpoint events + * Check if breakpoint type is supported before proceeding. + * Also, no branch sampling for breakpoint events. */ - if (has_branch_stack(bp)) + if (!hw_breakpoint_slots_cached(find_slot_idx(bp->attr.bp_type)) || has_branch_stack(bp)) return -EOPNOTSUPP; err = register_perf_hw_breakpoint(bp); diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 98588e96b591..4c4894de6e5d 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -19,7 +19,7 @@ static void perf_output_wakeup(struct perf_output_handle *handle) { - atomic_set(&handle->rb->poll, EPOLLIN); + atomic_set(&handle->rb->poll, EPOLLIN | EPOLLRDNORM); handle->event->pending_wakeup = 1; irq_work_queue(&handle->event->pending_irq); @@ -181,6 +181,7 @@ __perf_output_begin(struct perf_output_handle *handle, handle->rb = rb; handle->event = event; + handle->flags = 0; have_lost = local_read(&rb->lost); if (unlikely(have_lost)) { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 9ee25351ceca..4c8fc82fc27a 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -484,6 +484,11 @@ retry: if (ret <= 0) goto put_old; + if (is_zero_page(old_page)) { + ret = -EINVAL; + goto put_old; + } + if (WARN(!is_register && PageCompound(old_page), "uprobe unregister should never work on compound page\n")) { ret = -EINVAL; @@ -1719,6 +1724,7 @@ void uprobe_free_utask(struct task_struct *t) if (!utask) return; + t->utask = NULL; if (utask->active_uprobe) put_uprobe(utask->active_uprobe); @@ -1728,7 +1734,6 @@ void uprobe_free_utask(struct task_struct *t) xol_free_insn_slot(t); kfree(utask); - t->utask = NULL; } /* diff --git a/kernel/fork.c b/kernel/fork.c index 09a935724bd9..8cc313d27188 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1943,6 +1943,91 @@ const struct file_operations pidfd_fops = { #endif }; +/** + * __pidfd_prepare - allocate a new pidfd_file and reserve a pidfd + * @pid: the struct pid for which to create a pidfd + * @flags: flags of the new @pidfd + * @pidfd: the pidfd to return + * + * Allocate a new file that stashes @pid and reserve a new pidfd number in the + * caller's file descriptor table. The pidfd is reserved but not installed yet. + + * The helper doesn't perform checks on @pid which makes it useful for pidfds + * created via CLONE_PIDFD where @pid has no task attached when the pidfd and + * pidfd file are prepared. + * + * If this function returns successfully the caller is responsible to either + * call fd_install() passing the returned pidfd and pidfd file as arguments in + * order to install the pidfd into its file descriptor table or they must use + * put_unused_fd() and fput() on the returned pidfd and pidfd file + * respectively. + * + * This function is useful when a pidfd must already be reserved but there + * might still be points of failure afterwards and the caller wants to ensure + * that no pidfd is leaked into its file descriptor table. + * + * Return: On success, a reserved pidfd is returned from the function and a new + * pidfd file is returned in the last argument to the function. On + * error, a negative error code is returned from the function and the + * last argument remains unchanged. + */ +static int __pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) +{ + int pidfd; + struct file *pidfd_file; + + if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC)) + return -EINVAL; + + pidfd = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + if (pidfd < 0) + return pidfd; + + pidfd_file = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, + flags | O_RDWR | O_CLOEXEC); + if (IS_ERR(pidfd_file)) { + put_unused_fd(pidfd); + return PTR_ERR(pidfd_file); + } + get_pid(pid); /* held by pidfd_file now */ + *ret = pidfd_file; + return pidfd; +} + +/** + * pidfd_prepare - allocate a new pidfd_file and reserve a pidfd + * @pid: the struct pid for which to create a pidfd + * @flags: flags of the new @pidfd + * @pidfd: the pidfd to return + * + * Allocate a new file that stashes @pid and reserve a new pidfd number in the + * caller's file descriptor table. The pidfd is reserved but not installed yet. + * + * The helper verifies that @pid is used as a thread group leader. + * + * If this function returns successfully the caller is responsible to either + * call fd_install() passing the returned pidfd and pidfd file as arguments in + * order to install the pidfd into its file descriptor table or they must use + * put_unused_fd() and fput() on the returned pidfd and pidfd file + * respectively. + * + * This function is useful when a pidfd must already be reserved but there + * might still be points of failure afterwards and the caller wants to ensure + * that no pidfd is leaked into its file descriptor table. + * + * Return: On success, a reserved pidfd is returned from the function and a new + * pidfd file is returned in the last argument to the function. On + * error, a negative error code is returned from the function and the + * last argument remains unchanged. + */ +int pidfd_prepare(struct pid *pid, unsigned int flags, struct file **ret) +{ + if (!pid || !pid_has_task(pid, PIDTYPE_TGID)) + return -EINVAL; + + return __pidfd_prepare(pid, flags, ret); +} + static void __delayed_free_task(struct rcu_head *rhp) { struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); @@ -2293,21 +2378,12 @@ static __latent_entropy struct task_struct *copy_process( * if the fd table isn't shared). */ if (clone_flags & CLONE_PIDFD) { - retval = get_unused_fd_flags(O_RDWR | O_CLOEXEC); + /* Note that no task has been attached to @pid yet. */ + retval = __pidfd_prepare(pid, O_RDWR | O_CLOEXEC, &pidfile); if (retval < 0) goto bad_fork_free_pid; - pidfd = retval; - pidfile = anon_inode_getfile("[pidfd]", &pidfd_fops, pid, - O_RDWR | O_CLOEXEC); - if (IS_ERR(pidfile)) { - put_unused_fd(pidfd); - retval = PTR_ERR(pidfile); - goto bad_fork_free_pid; - } - get_pid(pid); /* held by pidfile now */ - retval = put_user(pidfd, args->pidfd); if (retval) goto bad_fork_put_pidfd; diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh index 12bcd08fe79d..5d506c6e8c0e 100755 --- a/kernel/gen_kheaders.sh +++ b/kernel/gen_kheaders.sh @@ -82,6 +82,7 @@ find $cpio_dir -type f -print0 | # Create archive and try to normalize metadata for reproducibility. tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \ + --exclude=".__afs*" --exclude=".nfs*" \ --owner=0 --group=0 --sort=name --numeric-owner --mode=u=rw,go=r,a+X \ -I $XZ -cf $tarfile -C $cpio_dir/ . > /dev/null diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 5fdc0b557579..35e85323940c 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -429,10 +429,6 @@ static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) { return desc->pending_mask; } -static inline bool handle_enforce_irqctx(struct irq_data *data) -{ - return irqd_is_handle_enforce_irqctx(data); -} bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); #else /* CONFIG_GENERIC_PENDING_IRQ */ static inline bool irq_can_move_pcntxt(struct irq_data *data) @@ -459,11 +455,12 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) { return false; } +#endif /* !CONFIG_GENERIC_PENDING_IRQ */ + static inline bool handle_enforce_irqctx(struct irq_data *data) { - return false; + return irqd_is_handle_enforce_irqctx(data); } -#endif /* !CONFIG_GENERIC_PENDING_IRQ */ #if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY) static inline int irq_domain_activate_irq(struct irq_data *data, bool reserve) diff --git a/kernel/kcov.c b/kernel/kcov.c index 9413f27294ca..cb3ecf07a34b 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c @@ -165,7 +165,7 @@ static void kcov_remote_area_put(struct kcov_remote_area *area, * Unlike in_serving_softirq(), this function returns false when called during * a hardirq or an NMI that happened in the softirq context. */ -static inline bool in_softirq_really(void) +static __always_inline bool in_softirq_really(void) { return in_serving_softirq() && !in_hardirq() && !in_nmi(); } diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c index 1d1d1b0e4248..f4623910fb1f 100644 --- a/kernel/kcsan/debugfs.c +++ b/kernel/kcsan/debugfs.c @@ -46,14 +46,8 @@ static struct { int used; /* number of elements used */ bool sorted; /* if elements are sorted */ bool whitelist; /* if list is a blacklist or whitelist */ -} report_filterlist = { - .addrs = NULL, - .size = 8, /* small initial size */ - .used = 0, - .sorted = false, - .whitelist = false, /* default is blacklist */ -}; -static DEFINE_SPINLOCK(report_filterlist_lock); +} report_filterlist; +static DEFINE_RAW_SPINLOCK(report_filterlist_lock); /* * The microbenchmark allows benchmarking KCSAN core runtime only. To run @@ -110,7 +104,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr) return false; func_addr -= offset; /* Get function start */ - spin_lock_irqsave(&report_filterlist_lock, flags); + raw_spin_lock_irqsave(&report_filterlist_lock, flags); if (report_filterlist.used == 0) goto out; @@ -127,7 +121,7 @@ bool kcsan_skip_report_debugfs(unsigned long func_addr) ret = !ret; out: - spin_unlock_irqrestore(&report_filterlist_lock, flags); + raw_spin_unlock_irqrestore(&report_filterlist_lock, flags); return ret; } @@ -135,9 +129,9 @@ static void set_report_filterlist_whitelist(bool whitelist) { unsigned long flags; - spin_lock_irqsave(&report_filterlist_lock, flags); + raw_spin_lock_irqsave(&report_filterlist_lock, flags); report_filterlist.whitelist = whitelist; - spin_unlock_irqrestore(&report_filterlist_lock, flags); + raw_spin_unlock_irqrestore(&report_filterlist_lock, flags); } /* Returns 0 on success, error-code otherwise. */ @@ -145,6 +139,9 @@ static ssize_t insert_report_filterlist(const char *func) { unsigned long flags; unsigned long addr = kallsyms_lookup_name(func); + unsigned long *delay_free = NULL; + unsigned long *new_addrs = NULL; + size_t new_size = 0; ssize_t ret = 0; if (!addr) { @@ -152,32 +149,33 @@ static ssize_t insert_report_filterlist(const char *func) return -ENOENT; } - spin_lock_irqsave(&report_filterlist_lock, flags); +retry_alloc: + /* + * Check if we need an allocation, and re-validate under the lock. Since + * the report_filterlist_lock is a raw, cannot allocate under the lock. + */ + if (data_race(report_filterlist.used == report_filterlist.size)) { + new_size = (report_filterlist.size ?: 4) * 2; + delay_free = new_addrs = kmalloc_array(new_size, sizeof(unsigned long), GFP_KERNEL); + if (!new_addrs) + return -ENOMEM; + } - if (report_filterlist.addrs == NULL) { - /* initial allocation */ - report_filterlist.addrs = - kmalloc_array(report_filterlist.size, - sizeof(unsigned long), GFP_ATOMIC); - if (report_filterlist.addrs == NULL) { - ret = -ENOMEM; - goto out; - } - } else if (report_filterlist.used == report_filterlist.size) { - /* resize filterlist */ - size_t new_size = report_filterlist.size * 2; - unsigned long *new_addrs = - krealloc(report_filterlist.addrs, - new_size * sizeof(unsigned long), GFP_ATOMIC); - - if (new_addrs == NULL) { - /* leave filterlist itself untouched */ - ret = -ENOMEM; - goto out; + raw_spin_lock_irqsave(&report_filterlist_lock, flags); + if (report_filterlist.used == report_filterlist.size) { + /* Check we pre-allocated enough, and retry if not. */ + if (report_filterlist.used >= new_size) { + raw_spin_unlock_irqrestore(&report_filterlist_lock, flags); + kfree(new_addrs); /* kfree(NULL) is safe */ + delay_free = new_addrs = NULL; + goto retry_alloc; } + if (report_filterlist.used) + memcpy(new_addrs, report_filterlist.addrs, report_filterlist.used * sizeof(unsigned long)); + delay_free = report_filterlist.addrs; /* free the old list */ + report_filterlist.addrs = new_addrs; /* switch to the new list */ report_filterlist.size = new_size; - report_filterlist.addrs = new_addrs; } /* Note: deduplicating should be done in userspace. */ @@ -185,9 +183,9 @@ static ssize_t insert_report_filterlist(const char *func) kallsyms_lookup_name(func); report_filterlist.sorted = false; -out: - spin_unlock_irqrestore(&report_filterlist_lock, flags); + raw_spin_unlock_irqrestore(&report_filterlist_lock, flags); + kfree(delay_free); return ret; } @@ -204,13 +202,13 @@ static int show_info(struct seq_file *file, void *v) } /* show filter functions, and filter type */ - spin_lock_irqsave(&report_filterlist_lock, flags); + raw_spin_lock_irqsave(&report_filterlist_lock, flags); seq_printf(file, "\n%s functions: %s\n", report_filterlist.whitelist ? "whitelisted" : "blacklisted", report_filterlist.used == 0 ? "none" : ""); for (i = 0; i < report_filterlist.used; ++i) seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]); - spin_unlock_irqrestore(&report_filterlist_lock, flags); + raw_spin_unlock_irqrestore(&report_filterlist_lock, flags); return 0; } diff --git a/kernel/kexec_elf.c b/kernel/kexec_elf.c index d3689632e8b9..3a5c25b2adc9 100644 --- a/kernel/kexec_elf.c +++ b/kernel/kexec_elf.c @@ -390,7 +390,7 @@ int kexec_elf_load(struct kimage *image, struct elfhdr *ehdr, struct kexec_buf *kbuf, unsigned long *lowest_load_addr) { - unsigned long lowest_addr = UINT_MAX; + unsigned long lowest_addr = ULONG_MAX; int ret; size_t i; diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 7fb9731bc5f5..5f8ce961cd9a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -6021,6 +6021,9 @@ static void zap_class(struct pending_free *pf, struct lock_class *class) hlist_del_rcu(&class->hash_entry); WRITE_ONCE(class->key, NULL); WRITE_ONCE(class->name, NULL); + /* Class allocated but not used, -1 in nr_unused_locks */ + if (class->usage_mask == 0) + debug_atomic_dec(nr_unused_locks); nr_lock_classes--; __clear_bit(class - lock_classes, lock_classes_in_use); if (class - lock_classes == max_lock_class_idx) diff --git a/kernel/locking/semaphore.c b/kernel/locking/semaphore.c index 34bfae72f295..de9117c0e671 100644 --- a/kernel/locking/semaphore.c +++ b/kernel/locking/semaphore.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -38,7 +39,7 @@ static noinline void __down(struct semaphore *sem); static noinline int __down_interruptible(struct semaphore *sem); static noinline int __down_killable(struct semaphore *sem); static noinline int __down_timeout(struct semaphore *sem, long timeout); -static noinline void __up(struct semaphore *sem); +static noinline void __up(struct semaphore *sem, struct wake_q_head *wake_q); /** * down - acquire the semaphore @@ -183,13 +184,16 @@ EXPORT_SYMBOL(down_timeout); void __sched up(struct semaphore *sem) { unsigned long flags; + DEFINE_WAKE_Q(wake_q); raw_spin_lock_irqsave(&sem->lock, flags); if (likely(list_empty(&sem->wait_list))) sem->count++; else - __up(sem); + __up(sem, &wake_q); raw_spin_unlock_irqrestore(&sem->lock, flags); + if (!wake_q_empty(&wake_q)) + wake_up_q(&wake_q); } EXPORT_SYMBOL(up); @@ -269,11 +273,12 @@ static noinline int __sched __down_timeout(struct semaphore *sem, long timeout) return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout); } -static noinline void __sched __up(struct semaphore *sem) +static noinline void __sched __up(struct semaphore *sem, + struct wake_q_head *wake_q) { struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, struct semaphore_waiter, list); list_del(&waiter->list); waiter->up = true; - wake_up_process(waiter->task); + wake_q_add(wake_q, waiter->task); } diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig index 26ea5d04f56c..cf0da0f844f9 100644 --- a/kernel/module/Kconfig +++ b/kernel/module/Kconfig @@ -131,6 +131,7 @@ comment "Do not forget to sign required modules with scripts/sign-file" choice prompt "Which hash algorithm should modules be signed with?" depends on MODULE_SIG || IMA_APPRAISE_MODSIG + default MODULE_SIG_SHA512 help This determines which sort of hashing algorithm will be used during signature generation. This algorithm _must_ be built into the kernel diff --git a/kernel/padata.c b/kernel/padata.c index 3e7bf1bbc6c2..b3f79f23060c 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -47,6 +47,22 @@ struct padata_mt_job_state { static void padata_free_pd(struct parallel_data *pd); static void __init padata_mt_helper(struct work_struct *work); +static inline void padata_get_pd(struct parallel_data *pd) +{ + refcount_inc(&pd->refcnt); +} + +static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt) +{ + if (refcount_sub_and_test(cnt, &pd->refcnt)) + padata_free_pd(pd); +} + +static inline void padata_put_pd(struct parallel_data *pd) +{ + padata_put_pd_cnt(pd, 1); +} + static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index) { int cpu, target_cpu; @@ -198,7 +214,7 @@ int padata_do_parallel(struct padata_shell *ps, if ((pinst->flags & PADATA_RESET)) goto out; - refcount_inc(&pd->refcnt); + padata_get_pd(pd); padata->pd = pd; padata->cb_cpu = *cb_cpu; @@ -328,8 +344,15 @@ static void padata_reorder(struct parallel_data *pd) smp_mb(); reorder = per_cpu_ptr(pd->reorder_list, pd->cpu); - if (!list_empty(&reorder->list) && padata_find_next(pd, false)) - queue_work(pinst->serial_wq, &pd->reorder_work); + if (!list_empty(&reorder->list) && padata_find_next(pd, false)) { + /* + * Other context(eg. the padata_serial_worker) can finish the request. + * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish. + */ + padata_get_pd(pd); + if (!queue_work(pinst->serial_wq, &pd->reorder_work)) + padata_put_pd(pd); + } } static void invoke_padata_reorder(struct work_struct *work) @@ -340,6 +363,8 @@ static void invoke_padata_reorder(struct work_struct *work) pd = container_of(work, struct parallel_data, reorder_work); padata_reorder(pd); local_bh_enable(); + /* Pairs with putting the reorder_work in the serial_wq */ + padata_put_pd(pd); } static void padata_serial_worker(struct work_struct *serial_work) @@ -372,8 +397,7 @@ static void padata_serial_worker(struct work_struct *serial_work) } local_bh_enable(); - if (refcount_sub_and_test(cnt, &pd->refcnt)) - padata_free_pd(pd); + padata_put_pd_cnt(pd, cnt); } /** @@ -670,8 +694,7 @@ static int padata_replace(struct padata_instance *pinst) synchronize_rcu(); list_for_each_entry_continue_reverse(ps, &pinst->pslist, list) - if (refcount_dec_and_test(&ps->opd->refcnt)) - padata_free_pd(ps->opd); + padata_put_pd(ps->opd); pinst->flags &= ~PADATA_RESET; @@ -959,7 +982,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr, pinst = kobj2pinst(kobj); pentry = attr2pentry(attr); - if (pentry->show) + if (pentry->store) ret = pentry->store(pinst, attr, buf, count); return ret; @@ -1110,11 +1133,16 @@ void padata_free_shell(struct padata_shell *ps) if (!ps) return; + /* + * Wait for all _do_serial calls to finish to avoid touching + * freed pd's and ps's. + */ + synchronize_rcu(); + mutex_lock(&ps->pinst->lock); list_del(&ps->list); pd = rcu_dereference_protected(ps->pd, 1); - if (refcount_dec_and_test(&pd->refcnt)) - padata_free_pd(pd); + padata_put_pd(pd); mutex_unlock(&ps->pinst->lock); kfree(ps); diff --git a/kernel/params.c b/kernel/params.c index fbe43d5acc3d..ac648252513a 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -945,7 +945,9 @@ int module_sysfs_initialized; static void module_kobj_release(struct kobject *kobj) { struct module_kobject *mk = to_module_kobject(kobj); - complete(mk->kobj_completion); + + if (mk->kobj_completion) + complete(mk->kobj_completion); } struct kobj_type module_ktype = { diff --git a/kernel/pid.c b/kernel/pid.c index 74834c04a081..8bce3aebc949 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -594,20 +594,15 @@ struct task_struct *pidfd_get_task(int pidfd, unsigned int *flags) */ int pidfd_create(struct pid *pid, unsigned int flags) { - int fd; + int pidfd; + struct file *pidfd_file; - if (!pid || !pid_has_task(pid, PIDTYPE_TGID)) - return -EINVAL; + pidfd = pidfd_prepare(pid, flags, &pidfd_file); + if (pidfd < 0) + return pidfd; - if (flags & ~(O_NONBLOCK | O_RDWR | O_CLOEXEC)) - return -EINVAL; - - fd = anon_inode_getfd("[pidfd]", &pidfd_fops, get_pid(pid), - flags | O_RDWR | O_CLOEXEC); - if (fd < 0) - put_pid(pid); - - return fd; + fd_install(pidfd, pidfd_file); + return pidfd; } /** diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 30d1274f03f6..ec34c96514b3 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -599,7 +599,11 @@ int hibernation_platform_enter(void) local_irq_disable(); system_state = SYSTEM_SUSPEND; - syscore_suspend(); + + error = syscore_suspend(); + if (error) + goto Enable_irqs; + if (pm_wakeup_pending()) { error = -EAGAIN; goto Power_up; @@ -611,6 +615,7 @@ int hibernation_platform_enter(void) Power_up: syscore_resume(); + Enable_irqs: system_state = SYSTEM_RUNNING; local_irq_enable(); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 0355ba43585a..0a286eca46a8 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -431,7 +431,7 @@ static struct latched_seq clear_seq = { /* record buffer */ #define LOG_ALIGN __alignof__(unsigned long) #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) -#define LOG_BUF_LEN_MAX (u32)(1 << 31) +#define LOG_BUF_LEN_MAX ((u32)1 << 31) static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); static char *log_buf = __log_buf; static u32 log_buf_len = __LOG_BUF_LEN; @@ -485,7 +485,7 @@ static u64 latched_seq_read_nolock(struct latched_seq *ls) seq = raw_read_seqcount_latch(&ls->latch); idx = seq & 0x1; val = ls->val[idx]; - } while (read_seqcount_latch_retry(&ls->latch, seq)); + } while (raw_read_seqcount_latch_retry(&ls->latch, seq)); return val; } diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 044026abfdd7..3929ef8148c1 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -821,8 +821,17 @@ void rcu_read_unlock_strict(void) { struct rcu_data *rdp; - if (irqs_disabled() || preempt_count() || !rcu_state.gp_kthread) + if (irqs_disabled() || in_atomic_preempt_off() || !rcu_state.gp_kthread) return; + + /* + * rcu_report_qs_rdp() can only be invoked with a stable rdp and + * from the local CPU. + * + * The in_atomic_preempt_off() check ensures that we come here holding + * the last preempt_count (which will get dropped once we return to + * __rcu_read_unlock(). + */ rdp = this_cpu_ptr(&rcu_data); rdp->cpu_no_qs.b.norm = false; rcu_report_qs_rdp(rdp); @@ -963,13 +972,16 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) */ static void rcu_flavor_sched_clock_irq(int user) { - if (user || rcu_is_cpu_rrupt_from_idle()) { + if (user || rcu_is_cpu_rrupt_from_idle() || + (IS_ENABLED(CONFIG_PREEMPT_COUNT) && + (preempt_count() == HARDIRQ_OFFSET))) { /* * Get here if this CPU took its interrupt from user - * mode or from the idle loop, and if this is not a - * nested interrupt. In this case, the CPU is in - * a quiescent state, so note it. + * mode, from the idle loop without this being a nested + * interrupt, or while not holding the task preempt count + * (with PREEMPT_COUNT=y). In this case, the CPU is in a + * quiescent state, so note it. * * No memory barrier is required here because rcu_qs() * references only CPU-local variables that other CPUs diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4bb51e4ad56e..e2ff8835e185 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -704,13 +704,15 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) #endif #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING if (static_key_false((¶virt_steal_rq_enabled))) { - steal = paravirt_steal_clock(cpu_of(rq)); + u64 prev_steal; + + steal = prev_steal = paravirt_steal_clock(cpu_of(rq)); steal -= rq->prev_steal_time_rq; if (unlikely(steal > delta)) steal = delta; - rq->prev_steal_time_rq += steal; + rq->prev_steal_time_rq = prev_steal; delta -= steal; } #endif @@ -995,9 +997,10 @@ void wake_up_q(struct wake_q_head *head) struct task_struct *task; task = container_of(node, struct task_struct, wake_q); - /* Task can safely be re-inserted now: */ node = node->next; - task->wake_q.next = NULL; + /* pairs with cmpxchg_relaxed() in __wake_q_add() */ + WRITE_ONCE(task->wake_q.next, NULL); + /* Task can safely be re-inserted now. */ /* * wake_up_process() executes a full barrier, which pairs with @@ -1162,9 +1165,9 @@ static void nohz_csd_func(void *info) WARN_ON(!(flags & NOHZ_KICK_MASK)); rq->idle_balance = idle_cpu(cpu); - if (rq->idle_balance && !need_resched()) { + if (rq->idle_balance) { rq->nohz_idle_balance = flags; - raise_softirq_irqoff(SCHED_SOFTIRQ); + __raise_softirq_irqoff(SCHED_SOFTIRQ); } } @@ -2057,7 +2060,7 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) if (!(flags & ENQUEUE_RESTORE)) { sched_info_enqueue(rq, p); - psi_enqueue(p, flags & ENQUEUE_WAKEUP); + psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)); } uclamp_rq_inc(rq, p); @@ -7386,6 +7389,14 @@ static void __setscheduler_params(struct task_struct *p, else if (fair_policy(policy)) p->static_prio = NICE_TO_PRIO(attr->sched_nice); + /* rt-policy tasks do not have a timerslack */ + if (task_is_realtime(p)) { + p->timer_slack_ns = 0; + } else if (p->timer_slack_ns == 0) { + /* when switching back to non-rt policy, restore timerslack */ + p->timer_slack_ns = p->default_timer_slack_ns; + } + /* * __sched_setscheduler() ensures attr->sched_priority == 0 when * !rt_policy. Always setting this ensures that things like @@ -8383,7 +8394,7 @@ SYSCALL_DEFINE0(sched_yield) #if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) int __sched __cond_resched(void) { - if (should_resched(0)) { + if (should_resched(0) && !irqs_disabled()) { preempt_schedule_common(); return 1; } diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 6e52aae3995f..48773281b019 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -99,10 +99,22 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, unsigned int next_freq) { - if (sg_policy->need_freq_update) - sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); - else if (sg_policy->next_freq == next_freq) + if (sg_policy->need_freq_update) { + sg_policy->need_freq_update = false; + /* + * The policy limits have changed, but if the return value of + * cpufreq_driver_resolve_freq() after applying the new limits + * is still equal to the previously selected frequency, the + * driver callback need not be invoked unless the driver + * specifically wants that to happen on every update of the + * policy limits. + */ + if (sg_policy->next_freq == next_freq && + !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS)) + return false; + } else if (sg_policy->next_freq == next_freq) { return false; + } sg_policy->next_freq = next_freq; sg_policy->last_freq_update_time = time; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 389290e950be..7f378fa0b6ed 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2762,7 +2762,7 @@ int sched_dl_global_validate(void) * value smaller than the currently allocated bandwidth in * any of the root_domains. */ - for_each_possible_cpu(cpu) { + for_each_online_cpu(cpu) { rcu_read_lock_sched(); if (dl_bw_visited(cpu, gen)) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1e12f731a033..d30e0936cfec 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3550,15 +3550,17 @@ static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq) { struct cfs_rq *prev_cfs_rq; struct list_head *prev; + struct rq *rq = rq_of(cfs_rq); if (cfs_rq->on_list) { prev = cfs_rq->leaf_cfs_rq_list.prev; } else { - struct rq *rq = rq_of(cfs_rq); - prev = rq->tmp_alone_branch; } + if (prev == &rq->leaf_cfs_rq_list) + return false; + prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list); return (prev_cfs_rq->tg->parent == cfs_rq->tg); @@ -8317,6 +8319,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) int tsk_cache_hot; lockdep_assert_rq_held(env->src_rq); + if (p->sched_task_hot) + p->sched_task_hot = 0; /* * We do not migrate tasks that are: @@ -8389,10 +8393,8 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env) if (tsk_cache_hot <= 0 || env->sd->nr_balance_failed > env->sd->cache_nice_tries) { - if (tsk_cache_hot == 1) { - schedstat_inc(env->sd->lb_hot_gained[env->idle]); - schedstat_inc(p->stats.nr_forced_migrations); - } + if (tsk_cache_hot == 1) + p->sched_task_hot = 1; return 1; } @@ -8407,6 +8409,12 @@ static void detach_task(struct task_struct *p, struct lb_env *env) { lockdep_assert_rq_held(env->src_rq); + if (p->sched_task_hot) { + p->sched_task_hot = 0; + schedstat_inc(env->sd->lb_hot_gained[env->idle]); + schedstat_inc(p->stats.nr_forced_migrations); + } + deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK); set_task_cpu(p, env->dst_cpu); } @@ -8567,6 +8575,9 @@ static int detach_tasks(struct lb_env *env) continue; next: + if (p->sched_task_hot) + schedstat_inc(p->stats.nr_failed_migrations_hot); + list_move(&p->se.group_node, tasks); } @@ -11401,7 +11412,7 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags) * work being done for other CPUs. Next load * balancing owner will pick it up. */ - if (need_resched()) { + if (!idle_cpu(this_cpu) && need_resched()) { if (flags & NOHZ_STATS_KICK) has_blocked_load = true; if (flags & NOHZ_NEXT_KICK) diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index b49a96fad1d2..b02dfc322951 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -132,11 +132,9 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup) if (p->in_memstall) set |= TSK_MEMSTALL_RUNNING; - if (!wakeup || p->sched_psi_wake_requeue) { + if (!wakeup) { if (p->in_memstall) set |= TSK_MEMSTALL; - if (p->sched_psi_wake_requeue) - p->sched_psi_wake_requeue = 0; } else { if (p->in_iowait) clear |= TSK_IOWAIT; @@ -147,8 +145,6 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup) static inline void psi_dequeue(struct task_struct *p, bool sleep) { - int clear = TSK_RUNNING; - if (static_branch_likely(&psi_disabled)) return; @@ -161,10 +157,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep) if (sleep) return; - if (p->in_memstall) - clear |= (TSK_MEMSTALL | TSK_MEMSTALL_RUNNING); - - psi_task_change(p, clear, 0); + psi_task_change(p, p->psi_flags, 0); } static inline void psi_ttwu_dequeue(struct task_struct *p) @@ -176,19 +169,12 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) * deregister its sleep-persistent psi states from the old * queue, and let psi_enqueue() know it has to requeue. */ - if (unlikely(p->in_iowait || p->in_memstall)) { + if (unlikely(p->psi_flags)) { struct rq_flags rf; struct rq *rq; - int clear = 0; - - if (p->in_iowait) - clear |= TSK_IOWAIT; - if (p->in_memstall) - clear |= TSK_MEMSTALL; rq = __task_rq_lock(p, &rf); - psi_task_change(p, clear, 0); - p->sched_psi_wake_requeue = 1; + psi_task_change(p, p->psi_flags, 0); __task_rq_unlock(rq, &rf); } } diff --git a/kernel/softirq.c b/kernel/softirq.c index a47396161843..9ab5ca399a99 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -140,6 +140,18 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), }; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key bh_lock_key; +struct lockdep_map bh_lock_map = { + .name = "local_bh", + .key = &bh_lock_key, + .wait_type_outer = LD_WAIT_FREE, + .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */ + .lock_type = LD_LOCK_PERCPU, +}; +EXPORT_SYMBOL_GPL(bh_lock_map); +#endif + /** * local_bh_blocked() - Check for idle whether BH processing is blocked * @@ -162,6 +174,8 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) WARN_ON_ONCE(in_hardirq()); + lock_map_acquire_read(&bh_lock_map); + /* First entry of a task into a BH disabled section? */ if (!current->softirq_disable_cnt) { if (preemptible()) { @@ -225,6 +239,8 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) WARN_ON_ONCE(in_hardirq()); lockdep_assert_irqs_enabled(); + lock_map_release(&bh_lock_map); + local_irq_save(flags); curcnt = __this_cpu_read(softirq_ctrl.cnt); @@ -275,6 +291,8 @@ static inline void ksoftirqd_run_begin(void) /* Counterpart to ksoftirqd_run_begin() */ static inline void ksoftirqd_run_end(void) { + /* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */ + lock_map_release(&bh_lock_map); __local_bh_enable(SOFTIRQ_OFFSET, true); WARN_ON_ONCE(in_interrupt()); local_irq_enable(); @@ -294,17 +312,24 @@ static inline void invoke_softirq(void) wakeup_softirqd(); } +#define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ) + /* * flush_smp_call_function_queue() can raise a soft interrupt in a function - * call. On RT kernels this is undesired and the only known functionality - * in the block layer which does this is disabled on RT. If soft interrupts - * get raised which haven't been raised before the flush, warn so it can be + * call. On RT kernels this is undesired and the only known functionalities + * are in the block layer which is disabled on RT, and in the scheduler for + * idle load balancing. If soft interrupts get raised which haven't been + * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be * investigated. */ void do_softirq_post_smp_call_flush(unsigned int was_pending) { - if (WARN_ON_ONCE(was_pending != local_softirq_pending())) + unsigned int is_pending = local_softirq_pending(); + + if (unlikely(was_pending != is_pending)) { + WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK)); invoke_softirq(); + } } #else /* CONFIG_PREEMPT_RT */ diff --git a/kernel/static_call_inline.c b/kernel/static_call_inline.c index 6f566fe27ec1..f0bd892ef157 100644 --- a/kernel/static_call_inline.c +++ b/kernel/static_call_inline.c @@ -15,7 +15,7 @@ extern struct static_call_site __start_static_call_sites[], extern struct static_call_tramp_key __start_static_call_tramp_key[], __stop_static_call_tramp_key[]; -static bool static_call_initialized; +bool static_call_initialized; /* mutex to protect key modules/sites */ static DEFINE_MUTEX(static_call_mutex); diff --git a/kernel/sys.c b/kernel/sys.c index d06eda1387b6..06a9a87a8d3e 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2477,6 +2477,8 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, error = current->timer_slack_ns; break; case PR_SET_TIMERSLACK: + if (task_is_realtime(current)) + break; if (arg2 <= 0) current->timer_slack_ns = current->default_timer_slack_ns; diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index a0a8370e06b6..d800ec74a14d 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -348,16 +348,18 @@ void clocksource_verify_percpu(struct clocksource *cs) cpumask_clear(&cpus_ahead); cpumask_clear(&cpus_behind); cpus_read_lock(); - preempt_disable(); + migrate_disable(); clocksource_verify_choose_cpus(); if (cpumask_empty(&cpus_chosen)) { - preempt_enable(); + migrate_enable(); cpus_read_unlock(); pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name); return; } testcpu = smp_processor_id(); - pr_warn("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", cs->name, testcpu, cpumask_pr_args(&cpus_chosen)); + pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n", + cs->name, testcpu, cpumask_pr_args(&cpus_chosen)); + preempt_disable(); for_each_cpu(cpu, &cpus_chosen) { if (cpu == testcpu) continue; @@ -377,6 +379,7 @@ void clocksource_verify_percpu(struct clocksource *cs) cs_nsec_min = cs_nsec; } preempt_enable(); + migrate_enable(); cpus_read_unlock(); if (!cpumask_empty(&cpus_ahead)) pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n", diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index f62cc13b5f14..b3860ec12450 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -58,6 +58,8 @@ #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT) #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD) +static void retrigger_next_event(void *arg); + /* * The timer bases: * @@ -111,7 +113,8 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = .clockid = CLOCK_TAI, .get_time = &ktime_get_clocktai, }, - } + }, + .csd = CSD_INIT(retrigger_next_event, NULL) }; static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { @@ -124,6 +127,14 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = { [CLOCK_TAI] = HRTIMER_BASE_TAI, }; +static inline bool hrtimer_base_is_online(struct hrtimer_cpu_base *base) +{ + if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) + return true; + else + return likely(base->online); +} + /* * Functions and macros which are different for UP/SMP systems are kept in a * single place @@ -145,11 +156,6 @@ static struct hrtimer_cpu_base migration_cpu_base = { #define migration_base migration_cpu_base.clock_base[0] -static inline bool is_migration_base(struct hrtimer_clock_base *base) -{ - return base == &migration_base; -} - /* * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock * means that all timers which are tied to this base via timer->base are @@ -182,27 +188,54 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, } /* - * We do not migrate the timer when it is expiring before the next - * event on the target cpu. When high resolution is enabled, we cannot - * reprogram the target cpu hardware and we would cause it to fire - * late. To keep it simple, we handle the high resolution enabled and - * disabled case similar. + * Check if the elected target is suitable considering its next + * event and the hotplug state of the current CPU. + * + * If the elected target is remote and its next event is after the timer + * to queue, then a remote reprogram is necessary. However there is no + * guarantee the IPI handling the operation would arrive in time to meet + * the high resolution deadline. In this case the local CPU becomes a + * preferred target, unless it is offline. + * + * High and low resolution modes are handled the same way for simplicity. * * Called with cpu_base->lock of target cpu held. */ -static int -hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) +static bool hrtimer_suitable_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base, + struct hrtimer_cpu_base *new_cpu_base, + struct hrtimer_cpu_base *this_cpu_base) { ktime_t expires; + /* + * The local CPU clockevent can be reprogrammed. Also get_target_base() + * guarantees it is online. + */ + if (new_cpu_base == this_cpu_base) + return true; + + /* + * The offline local CPU can't be the default target if the + * next remote target event is after this timer. Keep the + * elected new base. An IPI will we issued to reprogram + * it as a last resort. + */ + if (!hrtimer_base_is_online(this_cpu_base)) + return true; + expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); - return expires < new_base->cpu_base->expires_next; + + return expires >= new_base->cpu_base->expires_next; } -static inline -struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, - int pinned) +static inline struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base, int pinned) { + if (!hrtimer_base_is_online(base)) { + int cpu = cpumask_any_and(cpu_online_mask, housekeeping_cpumask(HK_TYPE_TIMER)); + + return &per_cpu(hrtimer_bases, cpu); + } + #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) if (static_branch_likely(&timers_migration_enabled) && !pinned) return &per_cpu(hrtimer_bases, get_nohz_timer_target()); @@ -253,8 +286,8 @@ again: raw_spin_unlock(&base->cpu_base->lock); raw_spin_lock(&new_base->cpu_base->lock); - if (new_cpu_base != this_cpu_base && - hrtimer_check_target(timer, new_base)) { + if (!hrtimer_suitable_target(timer, new_base, new_cpu_base, + this_cpu_base)) { raw_spin_unlock(&new_base->cpu_base->lock); raw_spin_lock(&base->cpu_base->lock); new_cpu_base = this_cpu_base; @@ -263,8 +296,7 @@ again: } WRITE_ONCE(timer->base, new_base); } else { - if (new_cpu_base != this_cpu_base && - hrtimer_check_target(timer, new_base)) { + if (!hrtimer_suitable_target(timer, new_base, new_cpu_base, this_cpu_base)) { new_cpu_base = this_cpu_base; goto again; } @@ -274,11 +306,6 @@ again: #else /* CONFIG_SMP */ -static inline bool is_migration_base(struct hrtimer_clock_base *base) -{ - return false; -} - static inline struct hrtimer_clock_base * lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) { @@ -728,8 +755,6 @@ static inline int hrtimer_is_hres_enabled(void) return hrtimer_hres_enabled; } -static void retrigger_next_event(void *arg); - /* * Switch to high resolution mode */ @@ -1215,6 +1240,7 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, u64 delta_ns, const enum hrtimer_mode mode, struct hrtimer_clock_base *base) { + struct hrtimer_cpu_base *this_cpu_base = this_cpu_ptr(&hrtimer_bases); struct hrtimer_clock_base *new_base; bool force_local, first; @@ -1226,9 +1252,15 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, * and enforce reprogramming after it is queued no matter whether * it is the new first expiring timer again or not. */ - force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases); + force_local = base->cpu_base == this_cpu_base; force_local &= base->cpu_base->next_timer == timer; + /* + * Don't force local queuing if this enqueue happens on a unplugged + * CPU after hrtimer_cpu_dying() has been invoked. + */ + force_local &= this_cpu_base->online; + /* * Remove an active timer from the queue. In case it is not queued * on the current CPU, make sure that remove_hrtimer() updates the @@ -1258,8 +1290,27 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, } first = enqueue_hrtimer(timer, new_base, mode); - if (!force_local) - return first; + if (!force_local) { + /* + * If the current CPU base is online, then the timer is + * never queued on a remote CPU if it would be the first + * expiring timer there. + */ + if (hrtimer_base_is_online(this_cpu_base)) + return first; + + /* + * Timer was enqueued remote because the current base is + * already offline. If the timer is the first to expire, + * kick the remote CPU to reprogram the clock event. + */ + if (first) { + struct hrtimer_cpu_base *new_cpu_base = new_base->cpu_base; + + smp_call_function_single_async(new_cpu_base->cpu, &new_cpu_base->csd); + } + return 0; + } /* * Timer was forced to stay on the current CPU to avoid @@ -1378,6 +1429,18 @@ static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base, } } +#ifdef CONFIG_SMP +static __always_inline bool is_migration_base(struct hrtimer_clock_base *base) +{ + return base == &migration_base; +} +#else +static __always_inline bool is_migration_base(struct hrtimer_clock_base *base) +{ + return false; +} +#endif + /* * This function is called on PREEMPT_RT kernels when the fast path * deletion of a timer failed because the timer callback function was @@ -2090,14 +2153,9 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, struct restart_block *restart; struct hrtimer_sleeper t; int ret = 0; - u64 slack; - - slack = current->timer_slack_ns; - if (dl_task(current) || rt_task(current)) - slack = 0; hrtimer_init_sleeper_on_stack(&t, clockid, mode); - hrtimer_set_expires_range_ns(&t.timer, rqtp, slack); + hrtimer_set_expires_range_ns(&t.timer, rqtp, current->timer_slack_ns); ret = do_nanosleep(&t, mode); if (ret != -ERESTART_RESTARTBLOCK) goto out; @@ -2177,6 +2235,15 @@ int hrtimers_prepare_cpu(unsigned int cpu) } cpu_base->cpu = cpu; + hrtimer_cpu_base_init_expiry_lock(cpu_base); + return 0; +} + +int hrtimers_cpu_starting(unsigned int cpu) +{ + struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); + + /* Clear out any left over state from a CPU down operation */ cpu_base->active_bases = 0; cpu_base->hres_active = 0; cpu_base->hang_detected = 0; @@ -2185,7 +2252,6 @@ int hrtimers_prepare_cpu(unsigned int cpu) cpu_base->expires_next = KTIME_MAX; cpu_base->softirq_expires_next = KTIME_MAX; cpu_base->online = 1; - hrtimer_cpu_base_init_expiry_lock(cpu_base); return 0; } @@ -2263,13 +2329,14 @@ int hrtimers_cpu_dying(unsigned int dying_cpu) void __init hrtimers_init(void) { hrtimers_prepare_cpu(smp_processor_id()); + hrtimers_cpu_starting(smp_processor_id()); open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq); } /** * schedule_hrtimeout_range_clock - sleep until timeout * @expires: timeout value (ktime_t) - * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks + * @delta: slack in expires timeout (ktime_t) * @mode: timer mode * @clock_id: timer clock to be used */ @@ -2296,13 +2363,6 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, return -EINTR; } - /* - * Override any slack passed by the user if under - * rt contraints. - */ - if (rt_task(current)) - delta = 0; - hrtimer_init_sleeper_on_stack(&t, clock_id, mode); hrtimer_set_expires_range_ns(&t.timer, *expires, delta); hrtimer_sleeper_start_expires(&t, mode); @@ -2322,7 +2382,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock); /** * schedule_hrtimeout_range - sleep until timeout * @expires: timeout value (ktime_t) - * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks + * @delta: slack in expires timeout (ktime_t) * @mode: timer mode * * Make the current task sleep until the given expiry time has diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 8d2dd214ec68..b1ba80ce8496 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -796,7 +796,7 @@ int __do_adjtimex(struct __kernel_timex *txc, const struct timespec64 *ts, txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, NTP_SCALE_SHIFT); if (!(time_status & STA_NANO)) - txc->offset = (u32)txc->offset / NSEC_PER_USEC; + txc->offset = div_s64(txc->offset, NSEC_PER_USEC); } result = time_state; /* mostly `TIME_OK' */ diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index 2d6cf93ca370..fc08d4ccdeeb 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -161,6 +161,7 @@ static int posix_timer_add(struct k_itimer *timer) return id; } spin_unlock(&hash_lock); + cond_resched(); } /* POSIX return code when no timer ID could be allocated */ return -EAGAIN; diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 8464c5acc913..e8f2fb09a214 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -77,7 +77,7 @@ notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq) notrace int sched_clock_read_retry(unsigned int seq) { - return read_seqcount_latch_retry(&cd.seq, seq); + return raw_read_seqcount_latch_retry(&cd.seq, seq); } unsigned long long notrace sched_clock(void) diff --git a/kernel/time/time.c b/kernel/time/time.c index 526257b3727c..a92c7f3277ad 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -539,7 +539,7 @@ EXPORT_SYMBOL(ns_to_timespec64); * - all other values are converted to jiffies by either multiplying * the input value by a factor or dividing it with a factor and * handling any 32-bit overflows. - * for the details see __msecs_to_jiffies() + * for the details see _msecs_to_jiffies() * * msecs_to_jiffies() checks for the passed in value being a constant * via __builtin_constant_p() allowing gcc to eliminate most of the diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 8ac43afc11f9..dd294e96193b 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -450,7 +450,7 @@ static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf) tkr = tkf->base + (seq & 0x01); now = ktime_to_ns(tkr->base); now += fast_tk_get_delta_ns(tkr); - } while (read_seqcount_latch_retry(&tkf->seq, seq)); + } while (raw_read_seqcount_latch_retry(&tkf->seq, seq)); return now; } @@ -566,7 +566,7 @@ static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono) basem = ktime_to_ns(tkr->base); baser = ktime_to_ns(tkr->base_real); delta = fast_tk_get_delta_ns(tkr); - } while (read_seqcount_latch_retry(&tkf->seq, seq)); + } while (raw_read_seqcount_latch_retry(&tkf->seq, seq)); if (mono) *mono = basem + delta; diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index ed7d6ad694fb..20a5e6962b69 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c @@ -46,7 +46,7 @@ static void print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer, int idx, u64 now) { - SEQ_printf(m, " #%d: <%pK>, %ps", idx, taddr, timer->function); + SEQ_printf(m, " #%d: <%p>, %ps", idx, taddr, timer->function); SEQ_printf(m, ", S:%02x", timer->state); SEQ_printf(m, "\n"); SEQ_printf(m, " # expires at %Lu-%Lu nsecs [in %Ld to %Ld nsecs]\n", @@ -98,7 +98,7 @@ next_one: static void print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) { - SEQ_printf(m, " .base: %pK\n", base); + SEQ_printf(m, " .base: %p\n", base); SEQ_printf(m, " .index: %d\n", base->index); SEQ_printf(m, " .resolution: %u nsecs\n", hrtimer_resolution); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 583961a9e539..7254c808b27c 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -403,7 +403,7 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { .arg2_type = ARG_CONST_SIZE, }; -static void __set_printk_clr_event(void) +static void __set_printk_clr_event(struct work_struct *work) { /* * This program might be calling bpf_trace_printk, @@ -416,10 +416,11 @@ static void __set_printk_clr_event(void) if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) pr_warn_ratelimited("could not enable bpf_trace_printk events"); } +static DECLARE_WORK(set_printk_work, __set_printk_clr_event); const struct bpf_func_proto *bpf_get_trace_printk_proto(void) { - __set_printk_clr_event(); + schedule_work(&set_printk_work); return &bpf_trace_printk_proto; } @@ -462,7 +463,7 @@ static const struct bpf_func_proto bpf_trace_vprintk_proto = { const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) { - __set_printk_clr_event(); + schedule_work(&set_printk_work); return &bpf_trace_vprintk_proto; } @@ -857,7 +858,7 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type) if (unlikely(is_global_init(current))) return -EPERM; - if (irqs_disabled()) { + if (preempt_count() != 0 || irqs_disabled()) { /* Do an early check on signal validity. Otherwise, * the error is lost in deferred irq_work. */ @@ -1192,7 +1193,8 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_LONG, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, + .arg3_size = sizeof(u64), }; BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value) @@ -1208,7 +1210,8 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = { .func = get_func_ret, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_LONG, + .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, + .arg2_size = sizeof(u64), }; BPF_CALL_1(get_func_arg_cnt, void *, ctx) @@ -2178,6 +2181,9 @@ void perf_event_detach_bpf_prog(struct perf_event *event) goto unlock; old_array = bpf_event_rcu_dereference(event->tp_event->prog_array); + if (!old_array) + goto put; + ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array); if (ret < 0) { bpf_prog_array_delete_safe(old_array, event->prog); @@ -2186,6 +2192,14 @@ void perf_event_detach_bpf_prog(struct perf_event *event) bpf_prog_array_free_sleepable(old_array); } +put: + /* + * It could be that the bpf_prog is not sleepable (and will be freed + * via normal RCU), but is called from a point that supports sleepable + * programs and uses tasks-trace-RCU. + */ + synchronize_rcu_tasks_trace(); + bpf_prog_put(event->prog); event->prog = NULL; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e9ce45dce31b..bd19c83c0e5e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -507,6 +507,7 @@ static int function_stat_show(struct seq_file *m, void *v) static struct trace_seq s; unsigned long long avg; unsigned long long stddev; + unsigned long long stddev_denom; #endif mutex_lock(&ftrace_profile_lock); @@ -528,23 +529,19 @@ static int function_stat_show(struct seq_file *m, void *v) #ifdef CONFIG_FUNCTION_GRAPH_TRACER seq_puts(m, " "); - /* Sample standard deviation (s^2) */ - if (rec->counter <= 1) - stddev = 0; - else { - /* - * Apply Welford's method: - * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) - */ + /* + * Variance formula: + * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2) + * Maybe Welford's method is better here? + * Divide only by 1000 for ns^2 -> us^2 conversion. + * trace_print_graph_duration will divide by 1000 again. + */ + stddev = 0; + stddev_denom = rec->counter * (rec->counter - 1) * 1000; + if (stddev_denom) { stddev = rec->counter * rec->time_squared - rec->time * rec->time; - - /* - * Divide only 1000 for ns^2 -> us^2 conversion. - * trace_print_graph_duration will divide 1000 again. - */ - stddev = div64_ul(stddev, - rec->counter * (rec->counter - 1) * 1000); + stddev = div64_ul(stddev, stddev_denom); } trace_seq_init(&s); @@ -4437,6 +4434,9 @@ ftrace_mod_callback(struct trace_array *tr, struct ftrace_hash *hash, char *func; int ret; + if (!tr) + return -ENODEV; + /* match_records() modifies func, and we need the original */ func = kstrdup(func_orig, GFP_KERNEL); if (!func) @@ -5105,6 +5105,9 @@ __ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) return -ENOENT; free_hash_entry(hash, entry); return 0; + } else if (__ftrace_lookup_ip(hash, ip) != NULL) { + /* Already exists */ + return 0; } return add_hash_entry(hash, ip); @@ -6519,6 +6522,7 @@ ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer) } } } + cond_resched(); } while_for_each_ftrace_rec(); out: mutex_unlock(&ftrace_lock); diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 0093fc56ab3a..21b7d044797e 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -5994,9 +5994,9 @@ static __init int rb_write_something(struct rb_test_data *data, bool nested) /* Ignore dropped events before test starts. */ if (started) { if (nested) - data->bytes_dropped += len; - else data->bytes_dropped_nested += len; + else + data->bytes_dropped += len; } return len; } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 96749a6cf111..9e0b9c9a7dff 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3422,10 +3422,9 @@ out_nobuffer: } EXPORT_SYMBOL_GPL(trace_vbprintk); -__printf(3, 0) -static int -__trace_array_vprintk(struct trace_buffer *buffer, - unsigned long ip, const char *fmt, va_list args) +static __printf(3, 0) +int __trace_array_vprintk(struct trace_buffer *buffer, + unsigned long ip, const char *fmt, va_list args) { struct trace_event_call *call = &event_print; struct ring_buffer_event *event; @@ -3478,7 +3477,6 @@ out_nobuffer: return len; } -__printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { @@ -3505,7 +3503,6 @@ int trace_array_vprintk(struct trace_array *tr, * Note, trace_array_init_printk() must be called on @tr before this * can be used. */ -__printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { @@ -3550,7 +3547,6 @@ int trace_array_init_printk(struct trace_array *tr) } EXPORT_SYMBOL_GPL(trace_array_init_printk); -__printf(3, 4) int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...) { @@ -3566,7 +3562,6 @@ int trace_array_printk_buf(struct trace_buffer *buffer, return ret; } -__printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); @@ -5262,6 +5257,9 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, cpumask_var_t tracing_cpumask_new; int err; + if (count == 0 || count > KMALLOC_MAX_SIZE) + return -EINVAL; + if (!zalloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL)) return -ENOMEM; @@ -7051,13 +7049,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, /* Copy the data into the page, so we can start over. */ ret = trace_seq_to_buffer(&iter->seq, page_address(spd.pages[i]), - trace_seq_used(&iter->seq)); + min((size_t)trace_seq_used(&iter->seq), + PAGE_SIZE)); if (ret < 0) { __free_page(spd.pages[i]); break; } spd.partial[i].offset = 0; - spd.partial[i].len = trace_seq_used(&iter->seq); + spd.partial[i].len = ret; trace_seq_init(&iter->seq); } diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index aad7fcd84617..49b297ca7fc7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -780,13 +780,15 @@ static inline void __init disable_tracing_selftest(const char *reason) extern void *head_page(struct trace_array_cpu *data); extern unsigned long long ns2usecs(u64 nsec); -extern int -trace_vbprintk(unsigned long ip, const char *fmt, va_list args); -extern int -trace_vprintk(unsigned long ip, const char *fmt, va_list args); -extern int -trace_array_vprintk(struct trace_array *tr, - unsigned long ip, const char *fmt, va_list args); + +__printf(2, 0) +int trace_vbprintk(unsigned long ip, const char *fmt, va_list args); +__printf(2, 0) +int trace_vprintk(unsigned long ip, const char *fmt, va_list args); +__printf(3, 0) +int trace_array_vprintk(struct trace_array *tr, + unsigned long ip, const char *fmt, va_list args); +__printf(3, 4) int trace_array_printk_buf(struct trace_buffer *buffer, unsigned long ip, const char *fmt, ...); void trace_printk_seq(struct trace_seq *s); diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 4702efb00ff2..4cb2ebc439be 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -154,5 +154,5 @@ static atomic64_t trace_counter; */ u64 notrace trace_clock_counter(void) { - return atomic64_add_return(1, &trace_counter); + return atomic64_inc_return(&trace_counter); } diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c index 4376887e0d8a..c9b0533407ed 100644 --- a/kernel/trace/trace_dynevent.c +++ b/kernel/trace/trace_dynevent.c @@ -16,7 +16,7 @@ #include "trace_output.h" /* for trace_event_sem */ #include "trace_dynevent.h" -static DEFINE_MUTEX(dyn_event_ops_mutex); +DEFINE_MUTEX(dyn_event_ops_mutex); static LIST_HEAD(dyn_event_ops_list); bool trace_event_dyn_try_get_ref(struct trace_event_call *dyn_call) @@ -125,6 +125,20 @@ out: return ret; } +/* + * Locked version of event creation. The event creation must be protected by + * dyn_event_ops_mutex because of protecting trace_probe_log. + */ +int dyn_event_create(const char *raw_command, struct dyn_event_operations *type) +{ + int ret; + + mutex_lock(&dyn_event_ops_mutex); + ret = type->create(raw_command); + mutex_unlock(&dyn_event_ops_mutex); + return ret; +} + static int create_dyn_event(const char *raw_command) { struct dyn_event_operations *ops; diff --git a/kernel/trace/trace_dynevent.h b/kernel/trace/trace_dynevent.h index 936477a111d3..beee3f8d7544 100644 --- a/kernel/trace/trace_dynevent.h +++ b/kernel/trace/trace_dynevent.h @@ -100,6 +100,7 @@ void *dyn_event_seq_next(struct seq_file *m, void *v, loff_t *pos); void dyn_event_seq_stop(struct seq_file *m, void *v); int dyn_events_release_all(struct dyn_event_operations *type); int dyn_event_release(const char *raw_command, struct dyn_event_operations *type); +int dyn_event_create(const char *raw_command, struct dyn_event_operations *type); /* * for_each_dyn_event - iterate over the dyn_event list diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c index d2370cdb4c1d..2a75cf3aa7bf 100644 --- a/kernel/trace/trace_eprobe.c +++ b/kernel/trace/trace_eprobe.c @@ -1069,6 +1069,11 @@ static int __trace_eprobe_create(int argc, const char *argv[]) goto error; } ret = dyn_event_add(&ep->devent, &ep->tp.event->call); + if (ret < 0) { + trace_probe_unregister_event_call(&ep->tp); + mutex_unlock(&event_mutex); + goto error; + } mutex_unlock(&event_mutex); return ret; parse_error: diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 61e3a2620fa3..f2000cf2b3bb 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -356,10 +356,16 @@ void perf_uprobe_destroy(struct perf_event *p_event) int perf_trace_add(struct perf_event *p_event, int flags) { struct trace_event_call *tp_event = p_event->tp_event; + struct hw_perf_event *hwc = &p_event->hw; if (!(flags & PERF_EF_START)) p_event->hw.state = PERF_HES_STOPPED; + if (is_sampling_event(p_event)) { + hwc->last_period = hwc->sample_period; + perf_swevent_set_period(p_event); + } + /* * If TRACE_REG_PERF_ADD returns false; no custom action was performed * and we need to take the default action of enqueueing our event on diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 24859d964505..8723ad2f9c63 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -242,19 +242,16 @@ int trace_event_get_offsets(struct trace_event_call *call) return tail->offset + tail->size; } -/* - * Check if the referenced field is an array and return true, - * as arrays are OK to dereference. - */ -static bool test_field(const char *fmt, struct trace_event_call *call) + +static struct trace_event_fields *find_event_field(const char *fmt, + struct trace_event_call *call) { struct trace_event_fields *field = call->class->fields_array; - const char *array_descriptor; const char *p = fmt; int len; if (!(len = str_has_prefix(fmt, "REC->"))) - return false; + return NULL; fmt += len; for (p = fmt; *p; p++) { if (!isalnum(*p) && *p != '_') @@ -263,16 +260,132 @@ static bool test_field(const char *fmt, struct trace_event_call *call) len = p - fmt; for (; field->type; field++) { - if (strncmp(field->name, fmt, len) || - field->name[len]) + if (strncmp(field->name, fmt, len) || field->name[len]) continue; - array_descriptor = strchr(field->type, '['); - /* This is an array and is OK to dereference. */ - return array_descriptor != NULL; + + return field; + } + return NULL; +} + +/* + * Check if the referenced field is an array and return true, + * as arrays are OK to dereference. + */ +static bool test_field(const char *fmt, struct trace_event_call *call) +{ + struct trace_event_fields *field; + + field = find_event_field(fmt, call); + if (!field) + return false; + + /* This is an array and is OK to dereference. */ + return strchr(field->type, '[') != NULL; +} + +/* Look for a string within an argument */ +static bool find_print_string(const char *arg, const char *str, const char *end) +{ + const char *r; + + r = strstr(arg, str); + return r && r < end; +} + +/* Return true if the argument pointer is safe */ +static bool process_pointer(const char *fmt, int len, struct trace_event_call *call) +{ + const char *r, *e, *a; + + e = fmt + len; + + /* Find the REC-> in the argument */ + r = strstr(fmt, "REC->"); + if (r && r < e) { + /* + * Addresses of events on the buffer, or an array on the buffer is + * OK to dereference. There's ways to fool this, but + * this is to catch common mistakes, not malicious code. + */ + a = strchr(fmt, '&'); + if ((a && (a < r)) || test_field(r, call)) + return true; + } else if (find_print_string(fmt, "__get_dynamic_array(", e)) { + return true; + } else if (find_print_string(fmt, "__get_rel_dynamic_array(", e)) { + return true; + } else if (find_print_string(fmt, "__get_dynamic_array_len(", e)) { + return true; + } else if (find_print_string(fmt, "__get_rel_dynamic_array_len(", e)) { + return true; + } else if (find_print_string(fmt, "__get_sockaddr(", e)) { + return true; + } else if (find_print_string(fmt, "__get_rel_sockaddr(", e)) { + return true; } return false; } +/* Return true if the string is safe */ +static bool process_string(const char *fmt, int len, struct trace_event_call *call) +{ + const char *r, *e, *s; + + e = fmt + len; + + /* + * There are several helper functions that return strings. + * If the argument contains a function, then assume its field is valid. + * It is considered that the argument has a function if it has: + * alphanumeric or '_' before a parenthesis. + */ + s = fmt; + do { + r = strstr(s, "("); + if (!r || r >= e) + break; + for (int i = 1; r - i >= s; i++) { + char ch = *(r - i); + if (isspace(ch)) + continue; + if (isalnum(ch) || ch == '_') + return true; + /* Anything else, this isn't a function */ + break; + } + /* A function could be wrapped in parethesis, try the next one */ + s = r + 1; + } while (s < e); + + /* + * Check for arrays. If the argument has: foo[REC->val] + * then it is very likely that foo is an array of strings + * that are safe to use. + */ + r = strstr(s, "["); + if (r && r < e) { + r = strstr(r, "REC->"); + if (r && r < e) + return true; + } + + /* + * If there's any strings in the argument consider this arg OK as it + * could be: REC->field ? "foo" : "bar" and we don't want to get into + * verifying that logic here. + */ + if (find_print_string(fmt, "\"", e)) + return true; + + /* Dereferenced strings are also valid like any other pointer */ + if (process_pointer(fmt, len, call)) + return true; + + /* Make sure the field is found, and consider it OK for now if it is */ + return find_event_field(fmt, call) != NULL; +} + /* * Examine the print fmt of the event looking for unsafe dereference * pointers using %p* that could be recorded in the trace event and @@ -282,13 +395,14 @@ static bool test_field(const char *fmt, struct trace_event_call *call) static void test_event_printk(struct trace_event_call *call) { u64 dereference_flags = 0; + u64 string_flags = 0; bool first = true; - const char *fmt, *c, *r, *a; + const char *fmt; int parens = 0; char in_quote = 0; int start_arg = 0; int arg = 0; - int i; + int i, e; fmt = call->print_fmt; @@ -342,6 +456,7 @@ static void test_event_printk(struct trace_event_call *call) case '%': continue; case 'p': + do_pointer: /* Find dereferencing fields */ switch (fmt[i + 1]) { case 'B': case 'R': case 'r': @@ -370,10 +485,24 @@ static void test_event_printk(struct trace_event_call *call) continue; if (fmt[i + j] == '*') { star = true; + /* Handle %*pbl case */ + if (!j && fmt[i + 1] == 'p') { + arg++; + i++; + goto do_pointer; + } continue; } - if ((fmt[i + j] == 's') && star) - arg++; + if ((fmt[i + j] == 's')) { + if (star) + arg++; + if (WARN_ONCE(arg == 63, + "Too many args for event: %s", + trace_event_name(call))) + return; + dereference_flags |= 1ULL << arg; + string_flags |= 1ULL << arg; + } break; } break; @@ -401,42 +530,47 @@ static void test_event_printk(struct trace_event_call *call) case ',': if (in_quote || parens) continue; + e = i; i++; while (isspace(fmt[i])) i++; - start_arg = i; - if (!(dereference_flags & (1ULL << arg))) - goto next_arg; - /* Find the REC-> in the argument */ - c = strchr(fmt + i, ','); - r = strstr(fmt + i, "REC->"); - if (r && (!c || r < c)) { - /* - * Addresses of events on the buffer, - * or an array on the buffer is - * OK to dereference. - * There's ways to fool this, but - * this is to catch common mistakes, - * not malicious code. - */ - a = strchr(fmt + i, '&'); - if ((a && (a < r)) || test_field(r, call)) - dereference_flags &= ~(1ULL << arg); - } else if ((r = strstr(fmt + i, "__get_dynamic_array(")) && - (!c || r < c)) { - dereference_flags &= ~(1ULL << arg); - } else if ((r = strstr(fmt + i, "__get_sockaddr(")) && - (!c || r < c)) { - dereference_flags &= ~(1ULL << arg); + /* + * If start_arg is zero, then this is the start of the + * first argument. The processing of the argument happens + * when the end of the argument is found, as it needs to + * handle paranthesis and such. + */ + if (!start_arg) { + start_arg = i; + /* Balance out the i++ in the for loop */ + i--; + continue; } - next_arg: - i--; + if (dereference_flags & (1ULL << arg)) { + if (string_flags & (1ULL << arg)) { + if (process_string(fmt + start_arg, e - start_arg, call)) + dereference_flags &= ~(1ULL << arg); + } else if (process_pointer(fmt + start_arg, e - start_arg, call)) + dereference_flags &= ~(1ULL << arg); + } + + start_arg = i; arg++; + /* Balance out the i++ in the for loop */ + i--; } } + if (dereference_flags & (1ULL << arg)) { + if (string_flags & (1ULL << arg)) { + if (process_string(fmt + start_arg, i - start_arg, call)) + dereference_flags &= ~(1ULL << arg); + } else if (process_pointer(fmt + start_arg, i - start_arg, call)) + dereference_flags &= ~(1ULL << arg); + } + /* * If you triggered the below warning, the trace event reported * uses an unsafe dereference pointer %p*. As the data stored @@ -649,7 +783,9 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, clear_bit(EVENT_FILE_FL_RECORDED_TGID_BIT, &file->flags); } - call->class->reg(call, TRACE_REG_UNREGISTER, file); + ret = call->class->reg(call, TRACE_REG_UNREGISTER, file); + + WARN_ON_ONCE(ret); } /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */ if (file->flags & EVENT_FILE_FL_SOFT_MODE) diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 86a0531efd43..23fbd1b10712 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -716,7 +716,7 @@ static __always_inline char *test_string(char *str) kstr = ubuf->buffer; /* For safety, do not trust the string pointer */ - if (!strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE)) + if (strncpy_from_kernel_nofault(kstr, str, USTRING_BUF_SIZE) < 0) return NULL; return kstr; } @@ -735,7 +735,7 @@ static __always_inline char *test_ustring(char *str) /* user space address? */ ustr = (char __user *)str; - if (!strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE)) + if (strncpy_from_user_nofault(kstr, ustr, USTRING_BUF_SIZE) < 0) return NULL; return kstr; diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 3b0da1bddf63..a11392596a36 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -6551,27 +6551,27 @@ static int event_hist_trigger_parse(struct event_command *cmd_ops, if (existing_hist_update_only(glob, trigger_data, file)) goto out_free; + if (!get_named_trigger_data(trigger_data)) { + + ret = create_actions(hist_data); + if (ret) + goto out_free; + + if (has_hist_vars(hist_data) || hist_data->n_var_refs) { + ret = save_hist_vars(hist_data); + if (ret) + goto out_free; + } + + ret = tracing_map_init(hist_data->map); + if (ret) + goto out_free; + } + ret = event_trigger_register(cmd_ops, file, glob, trigger_data); if (ret < 0) goto out_free; - if (get_named_trigger_data(trigger_data)) - goto enable; - - ret = create_actions(hist_data); - if (ret) - goto out_unreg; - - if (has_hist_vars(hist_data) || hist_data->n_var_refs) { - ret = save_hist_vars(hist_data); - if (ret) - goto out_unreg; - } - - ret = tracing_map_init(hist_data->map); - if (ret) - goto out_unreg; -enable: ret = hist_trigger_enable(trigger_data, file); if (ret) goto out_unreg; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 94a2e9d21f58..385e9fbbfbe7 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -312,7 +312,7 @@ static const char *synth_field_fmt(char *type) else if (strcmp(type, "gfp_t") == 0) fmt = "%x"; else if (synth_field_is_string(type)) - fmt = "%.*s"; + fmt = "%s"; else if (synth_field_is_stack(type)) fmt = "%s"; @@ -875,6 +875,38 @@ static struct trace_event_fields synth_event_fields_array[] = { {} }; +static int synth_event_reg(struct trace_event_call *call, + enum trace_reg type, void *data) +{ + struct synth_event *event = container_of(call, struct synth_event, call); + + switch (type) { +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_REGISTER: +#endif + case TRACE_REG_REGISTER: + if (!try_module_get(event->mod)) + return -EBUSY; + break; + default: + break; + } + + int ret = trace_event_reg(call, type, data); + + switch (type) { +#ifdef CONFIG_PERF_EVENTS + case TRACE_REG_PERF_UNREGISTER: +#endif + case TRACE_REG_UNREGISTER: + module_put(event->mod); + break; + default: + break; + } + return ret; +} + static int register_synth_event(struct synth_event *event) { struct trace_event_call *call = &event->call; @@ -904,7 +936,7 @@ static int register_synth_event(struct synth_event *event) goto out; } call->flags = TRACE_EVENT_FL_TRACEPOINT; - call->class->reg = trace_event_reg; + call->class->reg = synth_event_reg; call->class->probe = trace_event_raw_event_synth; call->data = event; call->tp = event->tp; diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index f941ce01ee35..afdbad16d00a 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c @@ -1539,7 +1539,7 @@ stacktrace_trigger(struct event_trigger_data *data, struct trace_event_file *file = data->private_data; if (file) - __trace_stack(file->tr, tracing_gen_ctx(), STACK_SKIP); + __trace_stack(file->tr, tracing_gen_ctx_dec(), STACK_SKIP); else trace_dump_stack(STACK_SKIP); } diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9f1bfbe105e8..44ae51d1dc45 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -185,7 +185,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; - trace_ctx = tracing_gen_ctx(); + trace_ctx = tracing_gen_ctx_dec(); cpu = smp_processor_id(); data = per_cpu_ptr(tr->array_buffer.data, cpu); @@ -285,7 +285,6 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, struct trace_array *tr = op->private; struct trace_array_cpu *data; unsigned int trace_ctx; - unsigned long flags; int bit; int cpu; @@ -312,8 +311,7 @@ function_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip, if (is_repeat_check(tr, last_info, ip, parent_ip)) goto out; - local_save_flags(flags); - trace_ctx = tracing_gen_ctx_flags(flags); + trace_ctx = tracing_gen_ctx_dec(); process_repeats(tr, ip, parent_ip, last_info, trace_ctx); trace_function(tr, ip, parent_ip, trace_ctx); @@ -563,11 +561,7 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip, static __always_inline void trace_stack(struct trace_array *tr) { - unsigned int trace_ctx; - - trace_ctx = tracing_gen_ctx(); - - __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP); + __trace_stack(tr, tracing_gen_ctx_dec(), FTRACE_STACK_SKIP); } static void diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 203204cadf92..a8c1f5634068 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -1244,6 +1244,7 @@ void graph_trace_close(struct trace_iterator *iter) if (data) { free_percpu(data->cpu_data); kfree(data); + iter->private = NULL; } } diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index ba37f768e2f2..6c9db857fe0e 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -231,8 +231,6 @@ static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - else - iter->private = NULL; } static void irqsoff_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index e3993d19687d..cc155c411768 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -702,7 +702,7 @@ static int trace_kprobe_module_callback(struct notifier_block *nb, static struct notifier_block trace_kprobe_module_nb = { .notifier_call = trace_kprobe_module_callback, - .priority = 1 /* Invoked after kprobe module callback */ + .priority = 2 /* Invoked after kprobe and jump_label module callback */ }; static int count_symbols(void *data, unsigned long unused) @@ -975,7 +975,7 @@ static int create_or_delete_trace_kprobe(const char *raw_command) if (raw_command[0] == '-') return dyn_event_release(raw_command, &trace_kprobe_ops); - ret = trace_kprobe_create(raw_command); + ret = dyn_event_create(raw_command, &trace_kprobe_ops); return ret == -ECANCELED ? -EINVAL : ret; } @@ -1816,7 +1816,7 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs, int ret; char *event; - if (func) { + if (func && !strchr(func, ':')) { unsigned int count; count = number_of_same_symbols(func); diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c index 92ef18197ab1..e5929cb3840f 100644 --- a/kernel/trace/trace_osnoise.c +++ b/kernel/trace/trace_osnoise.c @@ -1756,7 +1756,6 @@ static int start_kthread(unsigned int cpu) if (IS_ERR(kthread)) { pr_err(BANNER "could not start sampling thread\n"); - stop_per_cpu_kthreads(); return -ENOMEM; } diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index ba48b5e270e1..3888a59c9dfe 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -143,9 +143,12 @@ fail: } static struct trace_probe_log trace_probe_log; +extern struct mutex dyn_event_ops_mutex; void trace_probe_log_init(const char *subsystem, int argc, const char **argv) { + lockdep_assert_held(&dyn_event_ops_mutex); + trace_probe_log.subsystem = subsystem; trace_probe_log.argc = argc; trace_probe_log.argv = argv; @@ -154,11 +157,15 @@ void trace_probe_log_init(const char *subsystem, int argc, const char **argv) void trace_probe_log_clear(void) { + lockdep_assert_held(&dyn_event_ops_mutex); + memset(&trace_probe_log, 0, sizeof(trace_probe_log)); } void trace_probe_log_set_index(int index) { + lockdep_assert_held(&dyn_event_ops_mutex); + trace_probe_log.index = index; } @@ -167,6 +174,8 @@ void __trace_probe_log_err(int offset, int err_type) char *command, *p; int i, len = 0, pos = 0; + lockdep_assert_held(&dyn_event_ops_mutex); + if (!trace_probe_log.argv) return; diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 0469a04a355f..330aee1c1a49 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -168,8 +168,6 @@ static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - else - iter->private = NULL; } static void wakeup_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 942ddbdace4a..e39c5ca76eab 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -299,6 +299,12 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) int syscall_nr; int size; + /* + * Syscall probe called with preemption enabled, but the ring + * buffer and per-cpu data require preemption to be disabled. + */ + guard(preempt_notrace)(); + syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; @@ -338,6 +344,12 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) struct trace_event_buffer fbuffer; int syscall_nr; + /* + * Syscall probe called with preemption enabled, but the ring + * buffer and per-cpu data require preemption to be disabled. + */ + guard(preempt_notrace)(); + syscall_nr = trace_get_syscall_nr(current, regs); if (syscall_nr < 0 || syscall_nr >= NR_syscalls) return; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index a6a3ff2a441e..53ef3cb65098 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -732,7 +732,7 @@ static int create_or_delete_trace_uprobe(const char *raw_command) if (raw_command[0] == '-') return dyn_event_release(raw_command, &trace_uprobe_ops); - ret = trace_uprobe_create(raw_command); + ret = dyn_event_create(raw_command, &trace_uprobe_ops); return ret == -ECANCELED ? -EINVAL : ret; } diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c index 3a56e7c8aa4f..1921ade45be3 100644 --- a/kernel/trace/tracing_map.c +++ b/kernel/trace/tracing_map.c @@ -845,15 +845,11 @@ int tracing_map_init(struct tracing_map *map) static int cmp_entries_dup(const void *A, const void *B) { const struct tracing_map_sort_entry *a, *b; - int ret = 0; a = *(const struct tracing_map_sort_entry **)A; b = *(const struct tracing_map_sort_entry **)B; - if (memcmp(a->key, b->key, a->elt->map->key_size)) - ret = 1; - - return ret; + return memcmp(a->key, b->key, a->elt->map->key_size); } static int cmp_entries_sum(const void *A, const void *B) diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 442bb92212f2..a72ea1c2f59f 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -270,6 +270,15 @@ long watch_queue_set_size(struct pipe_inode_info *pipe, unsigned int nr_notes) if (ret < 0) goto error; + /* + * pipe_resize_ring() does not update nr_accounted for watch_queue + * pipes, because the above vastly overprovisions. Set nr_accounted on + * and max_usage this pipe to the number that was actually charged to + * the user above via account_pipe_buffers. + */ + pipe->max_usage = nr_pages; + pipe->nr_accounted = nr_pages; + ret = -ENOMEM; pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); if (!pages) diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c index c02baa4168e1..055356508d97 100644 --- a/lib/842/842_compress.c +++ b/lib/842/842_compress.c @@ -532,6 +532,8 @@ int sw842_compress(const u8 *in, unsigned int ilen, } if (repeat_count) { ret = add_repeat_template(p, repeat_count); + if (ret) + return ret; repeat_count = 0; if (next == last) /* reached max repeat bits */ goto repeat; diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index de27a91b9d66..ee7a044a607d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1424,7 +1424,7 @@ config LOCKDEP_SMALL config LOCKDEP_BITS int "Bitsize for MAX_LOCKDEP_ENTRIES" depends on LOCKDEP && !LOCKDEP_SMALL - range 10 30 + range 10 24 default 15 help Try increasing this value if you hit "BUG: MAX_LOCKDEP_ENTRIES too low!" message. @@ -1440,7 +1440,7 @@ config LOCKDEP_CHAINS_BITS config LOCKDEP_STACK_TRACE_BITS int "Bitsize for MAX_STACK_TRACE_ENTRIES" depends on LOCKDEP && !LOCKDEP_SMALL - range 10 30 + range 10 26 default 19 help Try increasing this value if you hit "BUG: MAX_STACK_TRACE_ENTRIES too low!" message. @@ -1448,7 +1448,7 @@ config LOCKDEP_STACK_TRACE_BITS config LOCKDEP_STACK_TRACE_HASH_BITS int "Bitsize for STACK_TRACE_HASH_SIZE" depends on LOCKDEP && !LOCKDEP_SMALL - range 10 30 + range 10 26 default 14 help Try increasing this value if you need large MAX_STACK_TRACE_ENTRIES. @@ -1456,7 +1456,7 @@ config LOCKDEP_STACK_TRACE_HASH_BITS config LOCKDEP_CIRCULAR_QUEUE_BITS int "Bitsize for elements in circular_queue struct" depends on LOCKDEP - range 10 30 + range 10 26 default 12 help Try increasing this value if you hit "lockdep bfs error:-1" warning due to __cq_enqueue() failure. diff --git a/lib/buildid.c b/lib/buildid.c index e41fb0ee405f..391382bd0541 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -5,6 +5,7 @@ #include #include #include +#include #define BUILD_ID 3 @@ -40,7 +41,7 @@ static int parse_build_id_buf(unsigned char *build_id, name_sz == note_name_sz && memcmp(nhdr + 1, note_name, note_name_sz) == 0 && desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) { - data = note_start + note_off + ALIGN(note_name_sz, 4); + data = note_start + note_off + sizeof(Elf32_Nhdr) + ALIGN(note_name_sz, 4); memcpy(build_id, data, desc_sz); memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz); if (size) @@ -157,6 +158,10 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, if (!vma->vm_file) return -EINVAL; + /* reject secretmem folios created with memfd_secret() */ + if (vma_is_secretmem(vma)) + return -EFAULT; + page = find_get_page(vma->vm_file->f_mapping, 0); if (!page) return -EFAULT; /* page not mapped */ diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index fde0aa244148..a75a9ca46b59 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -116,7 +116,7 @@ EXPORT_SYMBOL(dql_completed); void dql_reset(struct dql *dql) { /* Reset all dynamic values */ - dql->limit = 0; + dql->limit = dql->min_limit; dql->num_queued = 0; dql->num_completed = 0; dql->last_obj_cnt = 0; diff --git a/lib/lzo/Makefile b/lib/lzo/Makefile index 2f58fafbbddd..fc7b2b7ef4b2 100644 --- a/lib/lzo/Makefile +++ b/lib/lzo/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -lzo_compress-objs := lzo1x_compress.o +lzo_compress-objs := lzo1x_compress.o lzo1x_compress_safe.o lzo_decompress-objs := lzo1x_decompress_safe.o obj-$(CONFIG_LZO_COMPRESS) += lzo_compress.o diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 9d31e7126606..f00dff9b9d4e 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -18,11 +18,22 @@ #include #include "lzodefs.h" -static noinline size_t -lzo1x_1_do_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - size_t ti, void *wrkmem, signed char *state_offset, - const unsigned char bitstream_version) +#undef LZO_UNSAFE + +#ifndef LZO_SAFE +#define LZO_UNSAFE 1 +#define LZO_SAFE(name) name +#define HAVE_OP(x) 1 +#endif + +#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun + +static noinline int +LZO_SAFE(lzo1x_1_do_compress)(const unsigned char *in, size_t in_len, + unsigned char **out, unsigned char *op_end, + size_t *tp, void *wrkmem, + signed char *state_offset, + const unsigned char bitstream_version) { const unsigned char *ip; unsigned char *op; @@ -30,8 +41,9 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len, const unsigned char * const ip_end = in + in_len - 20; const unsigned char *ii; lzo_dict_t * const dict = (lzo_dict_t *) wrkmem; + size_t ti = *tp; - op = out; + op = *out; ip = in; ii = ip; ip += ti < 4 ? 4 - ti : 0; @@ -116,25 +128,32 @@ next: if (t != 0) { if (t <= 3) { op[*state_offset] |= t; + NEED_OP(4); COPY4(op, ii); op += t; } else if (t <= 16) { + NEED_OP(17); *op++ = (t - 3); COPY8(op, ii); COPY8(op + 8, ii + 8); op += t; } else { if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (unlikely(tt > 255)) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -151,6 +170,7 @@ next: if (unlikely(run_length)) { ip += run_length; run_length -= MIN_ZERO_RUN_LENGTH; + NEED_OP(4); put_unaligned_le32((run_length << 21) | 0xfffc18 | (run_length & 0x7), op); op += 4; @@ -243,10 +263,12 @@ m_len_done: ip += m_len; if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) { m_off -= 1; + NEED_OP(2); *op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2)); *op++ = (m_off >> 3); } else if (m_off <= M3_MAX_OFFSET) { m_off -= 1; + NEED_OP(1); if (m_len <= M3_MAX_LEN) *op++ = (M3_MARKER | (m_len - 2)); else { @@ -254,14 +276,18 @@ m_len_done: *op++ = M3_MARKER | 0; while (unlikely(m_len > 255)) { m_len -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } else { m_off -= 0x4000; + NEED_OP(1); if (m_len <= M4_MAX_LEN) *op++ = (M4_MARKER | ((m_off >> 11) & 8) | (m_len - 2)); @@ -282,11 +308,14 @@ m_len_done: m_len -= M4_MAX_LEN; *op++ = (M4_MARKER | ((m_off >> 11) & 8)); while (unlikely(m_len > 255)) { + NEED_OP(1); m_len -= 255; *op++ = 0; } + NEED_OP(1); *op++ = (m_len); } + NEED_OP(2); *op++ = (m_off << 2); *op++ = (m_off >> 6); } @@ -295,14 +324,20 @@ finished_writing_instruction: ii = ip; goto next; } - *out_len = op - out; - return in_end - (ii - ti); + *out = op; + *tp = in_end - (ii - ti); + return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; } -static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem, const unsigned char bitstream_version) +static int LZO_SAFE(lzogeneric1x_1_compress)( + const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem, const unsigned char bitstream_version) { + unsigned char * const op_end = out + *out_len; const unsigned char *ip = in; unsigned char *op = out; unsigned char *data_start; @@ -326,14 +361,18 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, while (l > 20) { size_t ll = min_t(size_t, l, m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; + int err; + if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS); memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t)); - t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem, - &state_offset, bitstream_version); + err = LZO_SAFE(lzo1x_1_do_compress)( + ip, ll, &op, op_end, &t, wrkmem, + &state_offset, bitstream_version); + if (err != LZO_E_OK) + return err; ip += ll; - op += *out_len; l -= ll; } t += l; @@ -342,20 +381,26 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, const unsigned char *ii = in + in_len - t; if (op == data_start && t <= 238) { + NEED_OP(1); *op++ = (17 + t); } else if (t <= 3) { op[state_offset] |= t; } else if (t <= 18) { + NEED_OP(1); *op++ = (t - 3); } else { size_t tt = t - 18; + NEED_OP(1); *op++ = 0; while (tt > 255) { tt -= 255; + NEED_OP(1); *op++ = 0; } + NEED_OP(1); *op++ = tt; } + NEED_OP(t); if (t >= 16) do { COPY8(op, ii); COPY8(op + 8, ii + 8); @@ -368,31 +413,38 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, } while (--t > 0); } + NEED_OP(3); *op++ = M4_MARKER | 1; *op++ = 0; *op++ = 0; *out_len = op - out; return LZO_E_OK; + +output_overrun: + return LZO_E_OUTPUT_OVERRUN; } -int lzo1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzo1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, wrkmem, 0); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, 0); } -int lzorle1x_1_compress(const unsigned char *in, size_t in_len, - unsigned char *out, size_t *out_len, - void *wrkmem) +int LZO_SAFE(lzorle1x_1_compress)(const unsigned char *in, size_t in_len, + unsigned char *out, size_t *out_len, + void *wrkmem) { - return lzogeneric1x_1_compress(in, in_len, out, out_len, - wrkmem, LZO_VERSION); + return LZO_SAFE(lzogeneric1x_1_compress)( + in, in_len, out, out_len, wrkmem, LZO_VERSION); } -EXPORT_SYMBOL_GPL(lzo1x_1_compress); -EXPORT_SYMBOL_GPL(lzorle1x_1_compress); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzo1x_1_compress)); +EXPORT_SYMBOL_GPL(LZO_SAFE(lzorle1x_1_compress)); +#ifndef LZO_UNSAFE MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LZO1X-1 Compressor"); +#endif diff --git a/lib/lzo/lzo1x_compress_safe.c b/lib/lzo/lzo1x_compress_safe.c new file mode 100644 index 000000000000..371c9f849492 --- /dev/null +++ b/lib/lzo/lzo1x_compress_safe.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * LZO1X Compressor from LZO + * + * Copyright (C) 1996-2012 Markus F.X.J. Oberhumer + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for Linux kernel use by: + * Nitin Gupta + * Richard Purdie + */ + +#define LZO_SAFE(name) name##_safe +#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) + +#include "lzo1x_compress.c" diff --git a/lib/maple_tree.c b/lib/maple_tree.c index ae8ae9470066..b5d216bdd3a5 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1890,11 +1890,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node, * Return: The first split location. The middle split is set in @mid_split. */ static inline int mab_calc_split(struct ma_state *mas, - struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) + struct maple_big_node *bn, unsigned char *mid_split) { unsigned char b_end = bn->b_end; int split = b_end / 2; /* Assume equal split. */ - unsigned char slot_min, slot_count = mt_slots[bn->type]; + unsigned char slot_count = mt_slots[bn->type]; /* * To support gap tracking, all NULL entries are kept together and a node cannot @@ -1927,17 +1927,7 @@ static inline int mab_calc_split(struct ma_state *mas, split = b_end / 3; *mid_split = split * 2; } else { - slot_min = mt_min_slots[bn->type]; - *mid_split = 0; - /* - * Avoid having a range less than the slot count unless it - * causes one node to be deficient. - * NOTE: mt_min_slots is 1 based, b_end and split are zero. - */ - while (((bn->pivot[split] - min) < slot_count - 1) && - (split < slot_count - 1) && (b_end - split > slot_min)) - split++; } /* Avoid ending a node on a NULL entry */ @@ -2663,7 +2653,7 @@ static inline struct maple_enode static inline unsigned char mas_mab_to_node(struct ma_state *mas, struct maple_big_node *b_node, struct maple_enode **left, struct maple_enode **right, struct maple_enode **middle, - unsigned char *mid_split, unsigned long min) + unsigned char *mid_split) { unsigned char split = 0; unsigned char slot_count = mt_slots[b_node->type]; @@ -2676,7 +2666,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas, if (b_node->b_end < slot_count) { split = b_node->b_end; } else { - split = mab_calc_split(mas, b_node, mid_split, min); + split = mab_calc_split(mas, b_node, mid_split); *right = mas_new_ma_node(mas, b_node); } @@ -3075,7 +3065,7 @@ static int mas_spanning_rebalance(struct ma_state *mas, mast->bn->b_end--; mast->bn->type = mte_node_type(mast->orig_l->node); split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, - &mid_split, mast->orig_l->min); + &mid_split); mast_set_split_parents(mast, left, middle, right, split, mid_split); mast_cp_to_nodes(mast, left, middle, right, split, mid_split); @@ -3590,7 +3580,7 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) if (mas_push_data(mas, height, &mast, false)) break; - split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); + split = mab_calc_split(mas, b_node, &mid_split); mast_split_data(&mast, mas, split); /* * Usually correct, mab_mas_cp in the above call overwrites @@ -3723,9 +3713,20 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry) return slot; } +/* + * mas_store_root() - Storing value into root. + * @mas: The maple state + * @entry: The entry to store. + * + * There is no root node now and we are storing a value into the root - this + * function either assigns the pointer or expands into a node. + */ static inline void mas_store_root(struct ma_state *mas, void *entry) { - if (likely((mas->last != 0) || (mas->index != 0))) + if (!entry) { + if (!mas->index) + rcu_assign_pointer(mas->tree->ma_root, NULL); + } else if (likely((mas->last != 0) || (mas->index != 0))) mas_root_expand(mas, entry); else if (((unsigned long) (entry) & 3) == 2) mas_root_expand(mas, entry); diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index b8556a2e7bb1..e499e13856fb 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -608,7 +608,6 @@ DEFINE_TEST_ALLOC(devm_kzalloc, devm_kfree, 1, 1, 0); static void overflow_allocation_test(struct kunit *test) { - const char device_name[] = "overflow-test"; struct device *dev; int count = 0; @@ -618,7 +617,7 @@ static void overflow_allocation_test(struct kunit *test) } while (0) /* Create dummy device for devm_kmalloc()-family tests. */ - dev = root_device_register(device_name); + dev = root_device_register("overflow-test"); KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), "Cannot register test device\n"); diff --git a/lib/sg_split.c b/lib/sg_split.c index 60a0babebf2e..0f89aab5c671 100644 --- a/lib/sg_split.c +++ b/lib/sg_split.c @@ -88,8 +88,6 @@ static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits) if (!j) { out_sg->offset += split->skip_sg0; out_sg->length -= split->skip_sg0; - } else { - out_sg->offset = 0; } sg_dma_address(out_sg) = 0; sg_dma_len(out_sg) = 0; diff --git a/lib/stackinit_kunit.c b/lib/stackinit_kunit.c index 4591d6cf5e01..d5d4fc48d46d 100644 --- a/lib/stackinit_kunit.c +++ b/lib/stackinit_kunit.c @@ -199,6 +199,7 @@ static noinline void test_ ## name (struct kunit *test) \ static noinline DO_NOTHING_TYPE_ ## which(var_type) \ do_nothing_ ## name(var_type *ptr) \ { \ + OPTIMIZER_HIDE_VAR(ptr); \ /* Will always be true, but compiler doesn't know. */ \ if ((unsigned long)ptr > 0x2) \ return DO_NOTHING_RETURN_ ## which(ptr); \ diff --git a/lib/string.c b/lib/string.c index 3371d26a0e39..c4b8349ff058 100644 --- a/lib/string.c +++ b/lib/string.c @@ -181,6 +181,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count) if (count == 0 || WARN_ON_ONCE(count > INT_MAX)) return -E2BIG; +#ifndef CONFIG_DCACHE_WORD_ACCESS #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS /* * If src is unaligned, don't cross a page boundary, @@ -195,12 +196,14 @@ ssize_t strscpy(char *dest, const char *src, size_t count) /* If src or dest is unaligned, don't do word-at-a-time. */ if (((long) dest | (long) src) & (sizeof(long) - 1)) max = 0; +#endif #endif /* - * read_word_at_a_time() below may read uninitialized bytes after the - * trailing zero and use them in comparisons. Disable this optimization - * under KMSAN to prevent false positive reports. + * load_unaligned_zeropad() or read_word_at_a_time() below may read + * uninitialized bytes after the trailing zero and use them in + * comparisons. Disable this optimization under KMSAN to prevent + * false positive reports. */ if (IS_ENABLED(CONFIG_KMSAN)) max = 0; @@ -208,7 +211,11 @@ ssize_t strscpy(char *dest, const char *src, size_t count) while (max >= sizeof(unsigned long)) { unsigned long c, data; +#ifdef CONFIG_DCACHE_WORD_ACCESS + c = load_unaligned_zeropad(src+res); +#else c = read_word_at_a_time(src+res); +#endif if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 230020a2e076..df3ecc167ccf 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -52,7 +52,7 @@ void string_get_size(u64 size, u64 blk_size, const enum string_size_units units, static const unsigned int rounding[] = { 500, 50, 5 }; int i = 0, j; u32 remainder = 0, sf_cap; - char tmp[8]; + char tmp[12]; const char *unit; tmp[0] = '\0'; diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 2062be1f2e80..f90f2b9842ec 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -35,18 +35,22 @@ static void test_ubsan_shift_out_of_bounds(void) static void test_ubsan_out_of_bounds(void) { - volatile int i = 4, j = 5, k = -1; - volatile char above[4] = { }; /* Protect surrounding memory. */ - volatile int arr[4]; - volatile char below[4] = { }; /* Protect surrounding memory. */ + int i = 4, j = 4, k = -1; + volatile struct { + char above[4]; /* Protect surrounding memory. */ + int arr[4]; + char below[4]; /* Protect surrounding memory. */ + } data; - above[0] = below[0]; + OPTIMIZER_HIDE_VAR(i); + OPTIMIZER_HIDE_VAR(j); + OPTIMIZER_HIDE_VAR(k); UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above"); - arr[j] = i; + data.arr[j] = i; UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below"); - arr[k] = i; + data.arr[k] = i; } enum ubsan_test_enum { diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h index e939598aff94..cfb3ba80a642 100644 --- a/mm/damon/vaddr-test.h +++ b/mm/damon/vaddr-test.h @@ -292,6 +292,7 @@ static void damon_test_split_evenly(struct kunit *test) damon_test_split_evenly_fail(test, 0, 100, 0); damon_test_split_evenly_succ(test, 0, 100, 10); damon_test_split_evenly_succ(test, 5, 59, 5); + damon_test_split_evenly_succ(test, 0, 3, 2); damon_test_split_evenly_fail(test, 5, 6, 2); } diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 260f0b775bfa..708f281c1b6b 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -67,6 +67,7 @@ static int damon_va_evenly_split_region(struct damon_target *t, unsigned long sz_orig, sz_piece, orig_end; struct damon_region *n = NULL, *next; unsigned long start; + unsigned int i; if (!r || !nr_pieces) return -EINVAL; @@ -80,8 +81,7 @@ static int damon_va_evenly_split_region(struct damon_target *t, r->ar.end = r->ar.start + sz_piece; next = damon_next_region(r); - for (start = r->ar.end; start + sz_piece <= orig_end; - start += sz_piece) { + for (start = r->ar.end, i = 1; i < nr_pieces; start += sz_piece, i++) { n = damon_new_region(start, start + sz_piece); if (!n) return -ENOMEM; diff --git a/mm/filemap.c b/mm/filemap.c index dc23b1336a8b..6649a853dc5f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2266,6 +2266,7 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, *start = folio->index + nr; goto out; } + xas_advance(&xas, folio_next_index(folio) - 1); continue; put_folio: folio_put(folio); @@ -2938,7 +2939,7 @@ static inline loff_t folio_seek_hole_data(struct xa_state *xas, if (ops->is_partially_uptodate(folio, offset, bsz) == seek_data) break; - start = (start + bsz) & ~(bsz - 1); + start = (start + bsz) & ~((u64)bsz - 1); offset += bsz; } while (offset < folio_size(folio)); unlock: diff --git a/mm/gup.c b/mm/gup.c index f4911ddd3070..37c55e61460e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -51,7 +51,8 @@ static inline void sanity_check_pinned_pages(struct page **pages, struct page *page = *pages; struct folio *folio = page_folio(page); - if (!folio_test_anon(folio)) + if (is_zero_page(page) || + !folio_test_anon(folio)) continue; if (!folio_test_large(folio) || folio_test_hugetlb(folio)) VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page); @@ -128,6 +129,13 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) else if (flags & FOLL_PIN) { struct folio *folio; + /* + * Don't take a pin on the zero page - it's not going anywhere + * and it is used in a *lot* of places. + */ + if (is_zero_page(page)) + return page_folio(page); + /* * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a * right zone, so fail and let the caller fall back to the slow @@ -177,6 +185,8 @@ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) { if (flags & FOLL_PIN) { + if (is_zero_folio(folio)) + return; node_stat_mod_folio(folio, NR_FOLL_PIN_RELEASED, refs); if (folio_test_large(folio)) atomic_sub(refs, folio_pincount_ptr(folio)); @@ -217,6 +227,13 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags) if (flags & FOLL_GET) folio_ref_inc(folio); else if (flags & FOLL_PIN) { + /* + * Don't take a pin on the zero page - it's not going anywhere + * and it is used in a *lot* of places. + */ + if (is_zero_page(page)) + return true; + /* * Similar to try_grab_folio(): be sure to *also* * increment the normal page refcount field at least once, @@ -1864,8 +1881,8 @@ size_t fault_in_safe_writeable(const char __user *uaddr, size_t size) } while (start != end); mmap_read_unlock(mm); - if (size > (unsigned long)uaddr - start) - return size - ((unsigned long)uaddr - start); + if (size > start - (unsigned long)uaddr) + return size - (start - (unsigned long)uaddr); return 0; } EXPORT_SYMBOL(fault_in_safe_writeable); @@ -1944,14 +1961,14 @@ struct page *get_dump_page(unsigned long addr) /* * Returns the number of collected pages. Return value is always >= 0. */ -static unsigned long collect_longterm_unpinnable_pages( +static void collect_longterm_unpinnable_pages( struct list_head *movable_page_list, unsigned long nr_pages, struct page **pages) { - unsigned long i, collected = 0; struct folio *prev_folio = NULL; bool drain_allow = true; + unsigned long i; for (i = 0; i < nr_pages; i++) { struct folio *folio = page_folio(pages[i]); @@ -1963,8 +1980,6 @@ static unsigned long collect_longterm_unpinnable_pages( if (folio_is_longterm_pinnable(folio)) continue; - collected++; - if (folio_is_device_coherent(folio)) continue; @@ -1986,8 +2001,6 @@ static unsigned long collect_longterm_unpinnable_pages( NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); } - - return collected; } /* @@ -2080,12 +2093,10 @@ err: static long check_and_migrate_movable_pages(unsigned long nr_pages, struct page **pages) { - unsigned long collected; LIST_HEAD(movable_page_list); - collected = collect_longterm_unpinnable_pages(&movable_page_list, - nr_pages, pages); - if (!collected) + collect_longterm_unpinnable_pages(&movable_page_list, nr_pages, pages); + if (list_empty(&movable_page_list)) return 0; return migrate_longterm_unpinnable_pages(&movable_page_list, nr_pages, @@ -3155,6 +3166,9 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast); * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for further details. + * + * Note that if a zero_page is amongst the returned pages, it will not have + * pins in it and unpin_user_page() will not remove pins from it. */ int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) @@ -3231,6 +3245,9 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast_only); * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. + * + * Note that if a zero_page is amongst the returned pages, it will not have + * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, @@ -3266,6 +3283,9 @@ EXPORT_SYMBOL(pin_user_pages_remote); * * FOLL_PIN means that the pages must be released via unpin_user_page(). Please * see Documentation/core-api/pin_user_pages.rst for details. + * + * Note that if a zero_page is amongst the returned pages, it will not have + * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, @@ -3288,6 +3308,9 @@ EXPORT_SYMBOL(pin_user_pages); * pin_user_pages_unlocked() is the FOLL_PIN variant of * get_user_pages_unlocked(). Behavior is the same, except that this one sets * FOLL_PIN and rejects FOLL_GET. + * + * Note that if a zero_page is amongst the returned pages, it will not have + * pins in it and unpin_user_page*() will not remove pins from it. */ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) diff --git a/mm/internal.h b/mm/internal.h index ac8795a89858..f6752fd3416b 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -52,6 +52,25 @@ struct folio_batch; void page_writeback_init(void); +/* + * This is a file-backed mapping, and is about to be memory mapped - invoke its + * mmap hook and safely handle error conditions. On error, VMA hooks will be + * mutated. + * + * @file: File which backs the mapping. + * @vma: VMA which we are mapping. + * + * Returns: 0 if success, error otherwise. + */ +int mmap_file(struct file *file, struct vm_area_struct *vma); + +/* + * If the VMA has a close hook then close it, and since closing it might leave + * it in an inconsistent state which makes the use of any hooks suspect, clear + * them down by installing dummy empty hooks. + */ +void vma_close(struct vm_area_struct *vma); + static inline void *folio_raw_mapping(struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 5d9ae80df495..6ad986c267b5 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -73,10 +73,18 @@ static int __init kasan_set_multi_shot(char *str) __setup("kasan_multi_shot", kasan_set_multi_shot); /* - * Used to suppress reports within kasan_disable/enable_current() critical - * sections, which are used for marking accesses to slab metadata. + * This function is used to check whether KASAN reports are suppressed for + * software KASAN modes via kasan_disable/enable_current() critical sections. + * + * This is done to avoid: + * 1. False-positive reports when accessing slab metadata, + * 2. Deadlocking when poisoned memory is accessed by the reporting code. + * + * Hardware Tag-Based KASAN instead relies on: + * For #1: Resetting tags via kasan_reset_tag(). + * For #2: Suppression of tag checks via CPU, see report_suppress_start/end(). */ -static bool report_suppressed(void) +static bool report_suppressed_sw(void) { #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) if (current->kasan_depth) @@ -85,6 +93,30 @@ static bool report_suppressed(void) return false; } +static void report_suppress_start(void) +{ +#ifdef CONFIG_KASAN_HW_TAGS + /* + * Disable preemption for the duration of printing a KASAN report, as + * hw_suppress_tag_checks_start() disables checks on the current CPU. + */ + preempt_disable(); + hw_suppress_tag_checks_start(); +#else + kasan_disable_current(); +#endif +} + +static void report_suppress_stop(void) +{ +#ifdef CONFIG_KASAN_HW_TAGS + hw_suppress_tag_checks_stop(); + preempt_enable(); +#else + kasan_enable_current(); +#endif +} + /* * Used to avoid reporting more than one KASAN bug unless kasan_multi_shot * is enabled. Note that KASAN tests effectively enable kasan_multi_shot @@ -141,7 +173,7 @@ static void update_kunit_status(bool sync) static void update_kunit_status(bool sync) { } #endif -static DEFINE_SPINLOCK(report_lock); +static DEFINE_RAW_SPINLOCK(report_lock); static void start_report(unsigned long *flags, bool sync) { @@ -152,8 +184,8 @@ static void start_report(unsigned long *flags, bool sync) /* Do not allow LOCKDEP mangling KASAN reports. */ lockdep_off(); /* Make sure we don't end up in loop. */ - kasan_disable_current(); - spin_lock_irqsave(&report_lock, *flags); + report_suppress_start(); + raw_spin_lock_irqsave(&report_lock, *flags); pr_err("==================================================================\n"); } @@ -163,14 +195,14 @@ static void end_report(unsigned long *flags, void *addr) trace_error_report_end(ERROR_DETECTOR_KASAN, (unsigned long)addr); pr_err("==================================================================\n"); - spin_unlock_irqrestore(&report_lock, *flags); + raw_spin_unlock_irqrestore(&report_lock, *flags); if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) check_panic_on_warn("KASAN"); if (kasan_arg_fault == KASAN_ARG_FAULT_PANIC) panic("kasan.fault=panic set ...\n"); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); lockdep_on(); - kasan_enable_current(); + report_suppress_stop(); } static void print_error_description(struct kasan_report_info *info) @@ -439,9 +471,13 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty struct kasan_report_info info; /* - * Do not check report_suppressed(), as an invalid-free cannot be - * caused by accessing slab metadata and thus should not be - * suppressed by kasan_disable/enable_current() critical sections. + * Do not check report_suppressed_sw(), as an invalid-free cannot be + * caused by accessing poisoned memory and thus should not be suppressed + * by kasan_disable/enable_current() critical sections. + * + * Note that for Hardware Tag-Based KASAN, kasan_report_invalid_free() + * is triggered by explicit tag checks and not by the ones performed by + * the CPU. Thus, reporting invalid-free is not suppressed as well. */ if (unlikely(!report_enabled())) return; @@ -476,7 +512,7 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long irq_flags; struct kasan_report_info info; - if (unlikely(report_suppressed()) || unlikely(!report_enabled())) { + if (unlikely(report_suppressed_sw()) || unlikely(!report_enabled())) { ret = false; goto out; } @@ -508,8 +544,9 @@ void kasan_report_async(void) unsigned long flags; /* - * Do not check report_suppressed(), as kasan_disable/enable_current() - * critical sections do not affect Hardware Tag-Based KASAN. + * Do not check report_suppressed_sw(), as + * kasan_disable/enable_current() critical sections do not affect + * Hardware Tag-Based KASAN. */ if (unlikely(!report_enabled())) return; diff --git a/mm/kfence/core.c b/mm/kfence/core.c index c597cfebb0e8..799d8503f35f 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -997,6 +998,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) * properties (e.g. reside in DMAable memory). */ if ((flags & GFP_ZONEMASK) || + ((flags & __GFP_THISNODE) && num_online_nodes() > 1) || (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); return NULL; diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 646e2979641f..9430b2e283ca 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1520,7 +1520,7 @@ static void kmemleak_scan(void) unsigned long phys = object->pointer; if (PHYS_PFN(phys) < min_low_pfn || - PHYS_PFN(phys + object->size) >= max_low_pfn) + PHYS_PFN(phys + object->size) > max_low_pfn) __paint_it(object, KMEMLEAK_BLACK); } diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index ec0da72e65aa..091a263e053f 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -346,6 +346,7 @@ void kmsan_handle_dma(struct page *page, size_t offset, size_t size, size -= to_go; } } +EXPORT_SYMBOL_GPL(kmsan_handle_dma); void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, enum dma_data_direction dir) diff --git a/mm/madvise.c b/mm/madvise.c index 5973399b2f9b..e1993e18afee 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -879,7 +879,16 @@ static long madvise_dontneed_free(struct vm_area_struct *vma, */ end = vma->vm_end; } - VM_WARN_ON(start >= end); + /* + * If the memory region between start and end was + * originally backed by 4kB pages and then remapped to + * be backed by hugepages while mmap_lock was dropped, + * the adjustment for hugetlb vma above may have rounded + * end down to the start address. + */ + if (start == end) + return 0; + VM_WARN_ON(start > end); } if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 280bb6969c0b..420efa1d2b20 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1250,8 +1250,11 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, struct task_struct *task; css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); - while (!ret && (task = css_task_iter_next(&it))) + while (!ret && (task = css_task_iter_next(&it))) { ret = fn(task, arg); + /* Avoid potential softlockup warning */ + cond_resched(); + } css_task_iter_end(&it); if (ret) { mem_cgroup_iter_break(memcg, iter); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 56b2dcc2c0d6..9ca2b58aceec 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -764,12 +764,17 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn, mmap_read_lock(p->mm); ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops, (void *)&priv); + /* + * ret = 1 when CMCI wins, regardless of whether try_to_unmap() + * succeeds or fails, then kill the process with SIGBUS. + * ret = 0 when poison page is a clean page and it's dropped, no + * SIGBUS is needed. + */ if (ret == 1 && priv.tk.addr) kill_proc(&priv.tk, pfn, flags); - else - ret = 0; mmap_read_unlock(p->mm); - return ret > 0 ? -EHWPOISON : -EFAULT; + + return ret > 0 ? -EHWPOISON : 0; } static const char *action_name[] = { diff --git a/mm/memory.c b/mm/memory.c index da9fed5e6025..454d918449b3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2659,11 +2659,11 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, if (fn) { do { if (create || !pte_none(*pte)) { - err = fn(pte++, addr, data); + err = fn(pte, addr, data); if (err) break; } - } while (addr += PAGE_SIZE, addr != end); + } while (pte++, addr += PAGE_SIZE, addr != end); } *mask |= PGTBL_PTE_MODIFIED; @@ -2802,8 +2802,10 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr, next = pgd_addr_end(addr, end); if (pgd_none(*pgd) && !create) continue; - if (WARN_ON_ONCE(pgd_leaf(*pgd))) - return -EINVAL; + if (WARN_ON_ONCE(pgd_leaf(*pgd))) { + err = -EINVAL; + break; + } if (!pgd_none(*pgd) && WARN_ON_ONCE(pgd_bad(*pgd))) { if (!create) continue; @@ -5323,7 +5325,7 @@ static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs } if (regs && !user_mode(regs)) { - unsigned long ip = instruction_pointer(regs); + unsigned long ip = exception_ip(regs); if (!search_exception_tables(ip)) return false; } @@ -5348,7 +5350,7 @@ static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_r { mmap_read_unlock(mm); if (regs && !user_mode(regs)) { - unsigned long ip = instruction_pointer(regs); + unsigned long ip = exception_ip(regs); if (!search_exception_tables(ip)) return false; } @@ -5846,10 +5848,8 @@ void __might_fault(const char *file, int line) if (pagefault_disabled()) return; __might_sleep(file, line); -#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) if (current->mm) might_lock_read(¤t->mm->mmap_lock); -#endif } EXPORT_SYMBOL(__might_fault); #endif diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index dc17618bad8b..c8cc2f63c3ea 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1656,8 +1656,12 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (PageHWPoison(page)) { if (WARN_ON(folio_test_lru(folio))) folio_isolate_lru(folio); - if (folio_mapped(folio)) + if (folio_mapped(folio)) { + folio_lock(folio); try_to_unmap(folio, TTU_IGNORE_MLOCK); + folio_unlock(folio); + } + continue; } diff --git a/mm/migrate.c b/mm/migrate.c index 209078154a46..7b986c9f4032 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -420,19 +420,17 @@ int folio_migrate_mapping(struct address_space *mapping, newfolio->index = folio->index; newfolio->mapping = folio->mapping; folio_ref_add(newfolio, nr); /* add cache reference */ - if (folio_test_swapbacked(folio)) { + if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); - if (folio_test_swapcache(folio)) { - int i; + if (folio_test_swapcache(folio)) { + int i; - folio_set_swapcache(newfolio); - for (i = 0; i < nr; i++) - set_page_private(folio_page(newfolio, i), - page_private(folio_page(folio, i))); - } + folio_set_swapcache(newfolio); + for (i = 0; i < nr; i++) + set_page_private(folio_page(newfolio, i), + page_private(folio_page(folio, i))); entries = nr; } else { - VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); entries = 1; } @@ -2424,6 +2422,14 @@ static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) if (managed_zone(pgdat->node_zones + z)) break; } + + /* + * If there are no managed zones, it should not proceed + * further. + */ + if (z < 0) + return 0; + wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); return 0; } diff --git a/mm/mmap.c b/mm/mmap.c index c0f9575493de..ebc3583fa612 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -136,8 +136,7 @@ void unlink_file_vma(struct vm_area_struct *vma) static void remove_vma(struct vm_area_struct *vma) { might_sleep(); - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); + vma_close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); @@ -1317,7 +1316,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, * to. we assume access permissions have been handled by the open * of the memory object, so we don't do any here. */ - vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | + vm_flags = calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; if (flags & MAP_LOCKED) @@ -2388,8 +2387,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, new->vm_start = new->vm_end; new->vm_pgoff = 0; /* Clean everything up if vma_adjust failed. */ - if (new->vm_ops && new->vm_ops->close) - new->vm_ops->close(new); + vma_close(new); if (new->vm_file) fput(new->vm_file); unlink_anon_vmas(new); @@ -2654,7 +2652,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return do_mas_munmap(&mas, mm, start, len, uf, false); } -unsigned long mmap_region(struct file *file, unsigned long addr, +static unsigned long __mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { @@ -2752,30 +2750,32 @@ cannot_expand: vma->vm_page_prot = vm_get_page_prot(vm_flags); vma->vm_pgoff = pgoff; - if (file) { - if (vm_flags & VM_SHARED) { - error = mapping_map_writable(file->f_mapping); - if (error) - goto free_vma; - } + if (mas_preallocate(&mas, vma, GFP_KERNEL)) { + error = -ENOMEM; + goto free_vma; + } + if (file) { vma->vm_file = get_file(file); - error = call_mmap(file, vma); + error = mmap_file(file, vma); if (error) - goto unmap_and_free_vma; + goto unmap_and_free_file_vma; + + /* Drivers cannot alter the address of the VMA. */ + WARN_ON_ONCE(addr != vma->vm_start); /* - * Expansion is handled above, merging is handled below. - * Drivers should not alter the address of the VMA. + * Drivers should not permit writability when previously it was + * disallowed. */ - if (WARN_ON((addr != vma->vm_start))) { - error = -EINVAL; - goto close_and_free_vma; - } + VM_WARN_ON_ONCE(vm_flags != vma->vm_flags && + !(vm_flags & VM_MAYWRITE) && + (vma->vm_flags & VM_MAYWRITE)); + mas_reset(&mas); /* - * If vm_flags changed after call_mmap(), we should try merge + * If vm_flags changed after mmap_file(), we should try merge * vma again as we may succeed this time. */ if (unlikely(vm_flags != vma->vm_flags && prev)) { @@ -2794,7 +2794,8 @@ cannot_expand: vma = merge; /* Update vm_flags to pick up the change. */ vm_flags = vma->vm_flags; - goto unmap_writable; + mas_destroy(&mas); + goto file_expanded; } } @@ -2802,31 +2803,15 @@ cannot_expand: } else if (vm_flags & VM_SHARED) { error = shmem_zero_setup(vma); if (error) - goto free_vma; + goto free_iter_vma; } else { vma_set_anonymous(vma); } - /* Allow architectures to sanity-check the vm_flags */ - if (!arch_validate_flags(vma->vm_flags)) { - error = -EINVAL; - if (file) - goto close_and_free_vma; - else if (vma->vm_file) - goto unmap_and_free_vma; - else - goto free_vma; - } - - if (mas_preallocate(&mas, vma, GFP_KERNEL)) { - error = -ENOMEM; - if (file) - goto close_and_free_vma; - else if (vma->vm_file) - goto unmap_and_free_vma; - else - goto free_vma; - } +#ifdef CONFIG_SPARC64 + /* TODO: Fix SPARC ADI! */ + WARN_ON_ONCE(!arch_validate_flags(vm_flags)); +#endif if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); @@ -2849,10 +2834,7 @@ cannot_expand: */ khugepaged_enter_vma(vma, vma->vm_flags); - /* Once vma denies write, undo our temporary denial count */ -unmap_writable: - if (file && vm_flags & VM_SHARED) - mapping_unmap_writable(file->f_mapping); +file_expanded: file = vma->vm_file; expanded: perf_event_mmap(vma); @@ -2881,29 +2863,54 @@ expanded: vma_set_page_prot(vma); - validate_mm(mm); return addr; -close_and_free_vma: - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); -unmap_and_free_vma: +unmap_and_free_file_vma: fput(vma->vm_file); vma->vm_file = NULL; /* Undo any partial mapping done by a device driver. */ unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end); - if (file && (vm_flags & VM_SHARED)) - mapping_unmap_writable(file->f_mapping); +free_iter_vma: + mas_destroy(&mas); free_vma: vm_area_free(vma); unacct_error: if (charged) vm_unacct_memory(charged); - validate_mm(mm); return error; } +unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf) +{ + unsigned long ret; + bool writable_file_mapping = false; + + /* Allow architectures to sanity-check the vm_flags. */ + if (!arch_validate_flags(vm_flags)) + return -EINVAL; + + /* Map writable and ensure this isn't a sealed memfd. */ + if (file && (vm_flags & VM_SHARED)) { + int error = mapping_map_writable(file->f_mapping); + + if (error) + return error; + writable_file_mapping = true; + } + + ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf); + + /* Clear our write mapping regardless of error. */ + if (writable_file_mapping) + mapping_unmap_writable(file->f_mapping); + + validate_mm(current->mm); + return ret; +} + static int __vm_munmap(unsigned long start, size_t len, bool downgrade) { int ret; @@ -3014,8 +3021,12 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, flags |= MAP_LOCKED; file = get_file(vma->vm_file); + ret = security_mmap_file(vma->vm_file, prot, flags); + if (ret) + goto out_fput; ret = do_mmap(vma->vm_file, start, size, prot, flags, pgoff, &populate, NULL); +out_fput: fput(file); out: mmap_write_unlock(mm); @@ -3376,8 +3387,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, return new_vma; out_vma_link: - if (new_vma->vm_ops && new_vma->vm_ops->close) - new_vma->vm_ops->close(new_vma); + vma_close(new_vma); if (new_vma->vm_file) fput(new_vma->vm_file); diff --git a/mm/nommu.c b/mm/nommu.c index 8e8fe491d914..6f4025d1b52b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -357,6 +357,13 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(vm_insert_page); +int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_pages); + int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num) { @@ -650,8 +657,7 @@ static int delete_vma_from_mm(struct vm_area_struct *vma) */ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) { - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); + vma_close(vma); if (vma->vm_file) fput(vma->vm_file); put_nommu_region(vma->vm_region); @@ -904,7 +910,7 @@ static unsigned long determine_vm_flags(struct file *file, { unsigned long vm_flags; - vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags); + vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(file, flags); /* vm_flags |= mm->def_flags; */ if (!(capabilities & NOMMU_MAP_DIRECT)) { @@ -939,7 +945,7 @@ static int do_mmap_shared_file(struct vm_area_struct *vma) { int ret; - ret = call_mmap(vma->vm_file, vma); + ret = mmap_file(vma->vm_file, vma); if (ret == 0) { vma->vm_region->vm_top = vma->vm_region->vm_end; return 0; @@ -970,7 +976,7 @@ static int do_mmap_private(struct vm_area_struct *vma, * - VM_MAYSHARE will be set if it may attempt to share */ if (capabilities & NOMMU_MAP_DIRECT) { - ret = call_mmap(vma->vm_file, vma); + ret = mmap_file(vma->vm_file, vma); if (ret == 0) { /* shouldn't return success if we're not sharing */ BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1276e49b31b0..f4c8ef863ea7 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -44,6 +44,8 @@ #include #include #include +#include +#include #include #include "internal.h" @@ -429,10 +431,15 @@ static void dump_tasks(struct oom_control *oc) mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); else { struct task_struct *p; + int i = 0; rcu_read_lock(); - for_each_process(p) + for_each_process(p) { + /* Avoid potential softlockup warning */ + if ((++i & 1023) == 0) + touch_softlockup_watchdog(); dump_task(p, oc); + } rcu_read_unlock(); } } @@ -757,6 +764,7 @@ static inline void queue_oom_reaper(struct task_struct *tsk) */ static void mark_oom_victim(struct task_struct *tsk) { + const struct cred *cred; struct mm_struct *mm = tsk->mm; WARN_ON(oom_killer_disabled); @@ -776,7 +784,9 @@ static void mark_oom_victim(struct task_struct *tsk) */ __thaw_task(tsk); atomic_inc(&oom_victims); - trace_mark_victim(tsk->pid); + cred = get_task_cred(tsk); + trace_mark_victim(tsk, cred->uid.val); + put_cred(cred); } /** diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e463adea6504..56ca81c5ef1b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1388,12 +1388,27 @@ static __always_inline bool free_pages_prepare(struct page *page, int bad = 0; bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); bool init = want_init_on_free(); + struct folio *folio = page_folio(page); VM_BUG_ON_PAGE(PageTail(page), page); trace_mm_page_free(page, order); kmsan_free_page(page, order); + /* + * In rare cases, when truncation or holepunching raced with + * munlock after VM_LOCKED was cleared, Mlocked may still be + * found set here. This does not indicate a problem, unless + * "unevictable_pgs_cleared" appears worryingly large. + */ + if (unlikely(folio_test_mlocked(folio))) { + long nr_pages = folio_nr_pages(folio); + + __folio_clear_mlocked(folio); + zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); + count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); + } + if (unlikely(PageHWPoison(page)) && !order) { /* * Do not let hwpoison pages hit pcplists/buddy @@ -5120,6 +5135,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, restart: compaction_retries = 0; no_progress_loops = 0; + compact_result = COMPACT_SKIPPED; compact_priority = DEF_COMPACT_PRIORITY; cpuset_mems_cookie = read_mems_allowed_begin(); zonelist_iter_cookie = zonelist_iter_begin(); @@ -5222,6 +5238,14 @@ restart: } retry: + /* + * Deal with possible cpuset update races or zonelist updates to avoid + * infinite retries. + */ + if (check_retry_cpuset(cpuset_mems_cookie, ac) || + check_retry_zonelist(zonelist_iter_cookie)) + goto restart; + /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */ if (alloc_flags & ALLOC_KSWAPD) wake_all_kswapds(order, gfp_mask, ac); @@ -5500,7 +5524,8 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, gfp = alloc_gfp; /* Find an allowed local zone that meets the low watermark. */ - for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { + z = ac.preferred_zoneref; + for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { unsigned long mark; if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) && diff --git a/mm/readahead.c b/mm/readahead.c index a3ab74fcd60b..dae42d8cf37c 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -620,7 +620,11 @@ static void ondemand_readahead(struct readahead_control *ractl, 1UL << order); if (index == expected || index == (ra->start + ra->size)) { ra->start += ra->size; - ra->size = get_next_ra_size(ra, max_pages); + /* + * In the case of MADV_HUGEPAGE, the actual size might exceed + * the readahead window. + */ + ra->size = max(ra->size, get_next_ra_size(ra, max_pages)); ra->async_size = ra->size; goto readit; } diff --git a/mm/rmap.c b/mm/rmap.c index 7da2d8d097d9..7d7bacd37ff4 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2306,7 +2306,7 @@ static bool folio_make_device_exclusive(struct folio *folio, * Restrict to anonymous folios for now to avoid potential writeback * issues. */ - if (!folio_test_anon(folio)) + if (!folio_test_anon(folio) || folio_test_hugetlb(folio)) return false; rmap_walk(folio, &rwc); diff --git a/mm/shmem.c b/mm/shmem.c index 0e1fbc53717d..2e6b7db7f14b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1086,9 +1086,7 @@ static int shmem_getattr(struct user_namespace *mnt_userns, stat->attributes_mask |= (STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE | STATX_ATTR_NODUMP); - inode_lock_shared(inode); generic_fillattr(&init_user_ns, inode, stat); - inode_unlock_shared(inode); if (shmem_is_huge(NULL, inode, 0, false)) stat->blksize = HPAGE_PMD_SIZE; @@ -2308,9 +2306,6 @@ static int shmem_mmap(struct file *file, struct vm_area_struct *vma) if (ret) return ret; - /* arm64 - allow memory tagging on RAM-based files */ - vma->vm_flags |= VM_MTE_ALLOWED; - file_accessed(file); vma->vm_ops = &shmem_vm_ops; return 0; diff --git a/mm/swap.c b/mm/swap.c index 955930f41d20..85aa04fc48a6 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -88,14 +88,6 @@ static void __page_cache_release(struct folio *folio) __folio_clear_lru_flags(folio); unlock_page_lruvec_irqrestore(lruvec, flags); } - /* See comment on folio_test_mlocked in release_pages() */ - if (unlikely(folio_test_mlocked(folio))) { - long nr_pages = folio_nr_pages(folio); - - __folio_clear_mlocked(folio); - zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); - count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); - } } static void __folio_put_small(struct folio *folio) @@ -1034,18 +1026,6 @@ void release_pages(struct page **pages, int nr) __folio_clear_lru_flags(folio); } - /* - * In rare cases, when truncation or holepunching raced with - * munlock after VM_LOCKED was cleared, Mlocked may still be - * found set here. This does not indicate a problem, unless - * "unevictable_pgs_cleared" appears worryingly large. - */ - if (unlikely(folio_test_mlocked(folio))) { - __folio_clear_mlocked(folio); - zone_stat_sub_folio(folio, NR_MLOCK); - count_vm_event(UNEVICTABLE_PGCLEARED); - } - list_add(&folio->lru, &pages_to_free); } if (lruvec) diff --git a/mm/util.c b/mm/util.c index 94fff247831b..d3a2877c176f 100644 --- a/mm/util.c +++ b/mm/util.c @@ -1103,6 +1103,39 @@ int __weak memcmp_pages(struct page *page1, struct page *page2) return ret; } +int mmap_file(struct file *file, struct vm_area_struct *vma) +{ + static const struct vm_operations_struct dummy_vm_ops = {}; + int err = call_mmap(file, vma); + + if (likely(!err)) + return 0; + + /* + * OK, we tried to call the file hook for mmap(), but an error + * arose. The mapping is in an inconsistent state and we most not invoke + * any further hooks on it. + */ + vma->vm_ops = &dummy_vm_ops; + + return err; +} + +void vma_close(struct vm_area_struct *vma) +{ + static const struct vm_operations_struct dummy_vm_ops = {}; + + if (vma->vm_ops && vma->vm_ops->close) { + vma->vm_ops->close(vma); + + /* + * The mapping is in an inconsistent state, and no further hooks + * may be invoked upon it. + */ + vma->vm_ops = &dummy_vm_ops; + } +} + #ifdef CONFIG_PRINTK /** * mem_dump_obj - Print available provenance information diff --git a/mm/vmalloc.c b/mm/vmalloc.c index a0b650f50faa..562994159216 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -566,13 +566,13 @@ static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end, mask |= PGTBL_PGD_MODIFIED; err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask); if (err) - return err; + break; } while (pgd++, addr = next, addr != end); if (mask & ARCH_PAGE_TABLE_SYNC_MASK) arch_sync_kernel_mappings(start, end); - return 0; + return err; } /* @@ -2709,7 +2709,8 @@ static void __vunmap(const void *addr, int deallocate_pages) struct page *page = area->pages[i]; BUG_ON(!page); - mod_memcg_page_state(page, MEMCG_VMALLOC, -1); + if (!(area->flags & VM_MAP_PUT_PAGES)) + mod_memcg_page_state(page, MEMCG_VMALLOC, -1); /* * High-order allocs for huge vmallocs are split, so * can be freed as an array of order-0 allocations @@ -2717,7 +2718,8 @@ static void __vunmap(const void *addr, int deallocate_pages) __free_pages(page, 0); cond_resched(); } - atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); + if (!(area->flags & VM_MAP_PUT_PAGES)) + atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); kvfree(area->pages); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 4cd0cbf9c121..1f7a90ecc700 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -588,7 +588,14 @@ unsigned long zone_reclaimable_pages(struct zone *zone) if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL)) nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) + zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON); - + /* + * If there are no reclaimable file-backed or anonymous pages, + * ensure zones with sufficient free pages are not skipped. + * This prevents zones like DMA32 from being ignored in reclaim + * scenarios where they can still help alleviate memory pressure. + */ + if (nr == 0) + nr = zone_page_state_snapshot(zone, NR_FREE_PAGES); return nr; } @@ -7722,7 +7729,7 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) return NODE_RECLAIM_NOSCAN; ret = __node_reclaim(pgdat, gfp_mask, order); - clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); + clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags); if (!ret) count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); diff --git a/mm/vmstat.c b/mm/vmstat.c index b2371d745e00..c060d3f95b27 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1711,6 +1711,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, zone_page_state(zone, i)); #ifdef CONFIG_NUMA + fold_vm_zone_numa_events(zone); for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++) seq_printf(m, "\n %-12s %lu", numa_stat_name(i), zone_numa_event_state(zone, i)); diff --git a/net/802/psnap.c b/net/802/psnap.c index 1406bfdbda13..dbd9647f2ef1 100644 --- a/net/802/psnap.c +++ b/net/802/psnap.c @@ -55,11 +55,11 @@ static int snap_rcv(struct sk_buff *skb, struct net_device *dev, goto drop; rcu_read_lock(); - proto = find_snap_client(skb_transport_header(skb)); + proto = find_snap_client(skb->data); if (proto) { /* Pass the frame on. */ - skb->transport_header += 5; skb_pull_rcsum(skb, 5); + skb_reset_transport_header(skb); rc = proto->rcvfunc(skb, dev, &snap_packet_type, orig_dev); } rcu_read_unlock(); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index e40aa3e3641c..b477ba37a699 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -131,7 +131,8 @@ int vlan_check_real_dev(struct net_device *real_dev, { const char *name = real_dev->name; - if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { + if (real_dev->features & NETIF_F_VLAN_CHALLENGED || + real_dev->type != ARPHRD_ETHER) { pr_info("VLANs not supported on %s\n", name); NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device"); return -EOPNOTSUPP; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index d3e511e1eba8..c08228d48834 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -272,17 +272,6 @@ static int vlan_dev_open(struct net_device *dev) goto out; } - if (dev->flags & IFF_ALLMULTI) { - err = dev_set_allmulti(real_dev, 1); - if (err < 0) - goto del_unicast; - } - if (dev->flags & IFF_PROMISC) { - err = dev_set_promiscuity(real_dev, 1); - if (err < 0) - goto clear_allmulti; - } - ether_addr_copy(vlan->real_dev_addr, real_dev->dev_addr); if (vlan->flags & VLAN_FLAG_GVRP) @@ -296,12 +285,6 @@ static int vlan_dev_open(struct net_device *dev) netif_carrier_on(dev); return 0; -clear_allmulti: - if (dev->flags & IFF_ALLMULTI) - dev_set_allmulti(real_dev, -1); -del_unicast: - if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) - dev_uc_del(real_dev, dev->dev_addr); out: netif_carrier_off(dev); return err; @@ -314,10 +297,6 @@ static int vlan_dev_stop(struct net_device *dev) dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); - if (dev->flags & IFF_ALLMULTI) - dev_set_allmulti(real_dev, -1); - if (dev->flags & IFF_PROMISC) - dev_set_promiscuity(real_dev, -1); if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); @@ -474,12 +453,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change) { struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; - if (dev->flags & IFF_UP) { - if (change & IFF_ALLMULTI) - dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) - dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); - } + if (change & IFF_ALLMULTI) + dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); } static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) diff --git a/net/9p/client.c b/net/9p/client.c index e876d6fea2fc..e89a91802e03 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -1545,7 +1545,8 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, struct p9_client *clnt = fid->clnt; struct p9_req_t *req; int count = iov_iter_count(to); - int rsize, received, non_zc = 0; + u32 rsize, received; + bool non_zc = false; char *dataptr; *err = 0; @@ -1568,7 +1569,7 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, 0, 11, "dqd", fid->fid, offset, rsize); } else { - non_zc = 1; + non_zc = true; req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, rsize); } @@ -1589,11 +1590,11 @@ p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, return 0; } if (rsize < received) { - pr_err("bogus RREAD count (%d > %d)\n", received, rsize); + pr_err("bogus RREAD count (%u > %u)\n", received, rsize); received = rsize; } - p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", received); + p9_debug(P9_DEBUG_9P, "<<< RREAD count %u\n", received); if (non_zc) { int n = copy_to_iter(dataptr, received, to); @@ -1620,9 +1621,9 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) *err = 0; while (iov_iter_count(from)) { - int count = iov_iter_count(from); - int rsize = fid->iounit; - int written; + size_t count = iov_iter_count(from); + u32 rsize = fid->iounit; + u32 written; if (!rsize || rsize > clnt->msize - P9_IOHDRSZ) rsize = clnt->msize - P9_IOHDRSZ; @@ -1630,7 +1631,7 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) if (count < rsize) rsize = count; - p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d (/%d)\n", + p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %u (/%zu)\n", fid->fid, offset, rsize, count); /* Don't bother zerocopy for small IO (< 1024) */ @@ -1656,11 +1657,11 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) break; } if (rsize < written) { - pr_err("bogus RWRITE count (%d > %d)\n", written, rsize); + pr_err("bogus RWRITE count (%u > %u)\n", written, rsize); written = rsize; } - p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", written); + p9_debug(P9_DEBUG_9P, "<<< RWRITE count %u\n", written); p9_req_put(clnt, req); iov_iter_revert(from, count - written - iov_iter_count(from)); @@ -2056,7 +2057,8 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate); int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) { - int err, rsize, non_zc = 0; + int err, non_zc = 0; + u32 rsize; struct p9_client *clnt; struct p9_req_t *req; char *dataptr; @@ -2065,7 +2067,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) iov_iter_kvec(&to, ITER_DEST, &kv, 1, count); - p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", + p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %u\n", fid->fid, offset, count); err = 0; @@ -2101,11 +2103,11 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) goto free_and_error; } if (rsize < count) { - pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize); + pr_err("bogus RREADDIR count (%u > %u)\n", count, rsize); count = rsize; } - p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); + p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %u\n", count); if (non_zc) memmove(data, dataptr, count); diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 68027e4fb421..4ad7e7a269ca 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -285,7 +285,7 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv) if (!priv->rings[i].intf) break; if (priv->rings[i].irq > 0) - unbind_from_irqhandler(priv->rings[i].irq, priv->dev); + unbind_from_irqhandler(priv->rings[i].irq, ring); if (priv->rings[i].data.in) { for (j = 0; j < (1 << priv->rings[i].intf->ring_order); @@ -465,6 +465,7 @@ static int xen_9pfs_front_init(struct xenbus_device *dev) goto error; } + xenbus_switch_state(dev, XenbusStateInitialised); return 0; error_xenbus: @@ -512,8 +513,10 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev, break; case XenbusStateInitWait: - if (!xen_9pfs_front_init(dev)) - xenbus_switch_state(dev, XenbusStateInitialised); + if (dev->state != XenbusStateInitialising) + break; + + xen_9pfs_front_init(dev); break; case XenbusStateConnected: diff --git a/net/Makefile b/net/Makefile index 0914bea9c335..103cd8d61f68 100644 --- a/net/Makefile +++ b/net/Makefile @@ -17,7 +17,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/ obj-$(CONFIG_INET) += ipv4/ obj-$(CONFIG_TLS) += tls/ obj-$(CONFIG_XFRM) += xfrm/ -obj-$(CONFIG_UNIX_SCM) += unix/ +obj-$(CONFIG_UNIX) += unix/ obj-y += ipv6/ obj-$(CONFIG_BPFILTER) += bpfilter/ obj-$(CONFIG_PACKET) += packet/ diff --git a/net/atm/lec.c b/net/atm/lec.c index 6257bf12e5a0..ac3cfc1ae510 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -181,6 +181,7 @@ static void lec_send(struct atm_vcc *vcc, struct sk_buff *skb) { struct net_device *dev = skb->dev; + unsigned int len = skb->len; ATM_SKB(skb)->vcc = vcc; atm_account_tx(vcc, skb); @@ -191,7 +192,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb) } dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += len; } static void lec_tx_timeout(struct net_device *dev, unsigned int txqueue) diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 033871e718a3..583c27131b7d 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c @@ -1314,6 +1314,8 @@ static void MPOA_cache_impos_rcvd(struct k_message *msg, holding_time = msg->content.eg_info.holding_time; dprintk("(%s) entry = %p, holding_time = %u\n", mpc->dev->name, entry, holding_time); + if (entry == NULL && !holding_time) + return; if (entry == NULL && holding_time) { entry = mpc->eg_ops->add_entry(msg, mpc); mpc->eg_ops->put(entry); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index a1e0be871687..862e03493b7e 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -467,7 +467,7 @@ einval_put: goto out_put; } -static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev) +static void ax25_fillin_cb_from_dev(ax25_cb *ax25, const ax25_dev *ax25_dev) { ax25->rtt = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]) / 2; ax25->t1 = msecs_to_jiffies(ax25_dev->values[AX25_VALUES_T1]); @@ -677,22 +677,33 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, break; } - rtnl_lock(); - dev = __dev_get_by_name(&init_net, devname); + rcu_read_lock(); + dev = dev_get_by_name_rcu(&init_net, devname); if (!dev) { - rtnl_unlock(); + rcu_read_unlock(); res = -ENODEV; break; } + if (ax25->ax25_dev) { + if (dev == ax25->ax25_dev->dev) { + rcu_read_unlock(); + break; + } + netdev_put(ax25->ax25_dev->dev, &ax25->dev_tracker); + ax25_dev_put(ax25->ax25_dev); + } + ax25->ax25_dev = ax25_dev_ax25dev(dev); if (!ax25->ax25_dev) { - rtnl_unlock(); + rcu_read_unlock(); res = -ENODEV; break; } ax25_fillin_cb(ax25, ax25->ax25_dev); - rtnl_unlock(); + netdev_hold(dev, &ax25->dev_tracker, GFP_ATOMIC); + ax25_dev_hold(ax25->ax25_dev); + rcu_read_unlock(); break; default: diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index e165fe108bb0..2b4f8df53b76 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -87,7 +87,7 @@ void ax25_dev_device_up(struct net_device *dev) spin_lock_bh(&ax25_dev_lock); list_add(&ax25_dev->list, &ax25_dev_list); - dev->ax25_ptr = ax25_dev; + rcu_assign_pointer(dev->ax25_ptr, ax25_dev); spin_unlock_bh(&ax25_dev_lock); ax25_register_dev_sysctl(ax25_dev); @@ -122,7 +122,7 @@ void ax25_dev_device_down(struct net_device *dev) } } - dev->ax25_ptr = NULL; + RCU_INIT_POINTER(dev->ax25_ptr, NULL); spin_unlock_bh(&ax25_dev_lock); netdev_put(dev, &ax25_dev->dev_tracker); ax25_dev_put(ax25_dev); diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c index 36249776c021..215d4ccf12b9 100644 --- a/net/ax25/ax25_ip.c +++ b/net/ax25/ax25_ip.c @@ -122,6 +122,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) if (dev == NULL) dev = skb->dev; + rcu_read_lock(); if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { kfree_skb(skb); goto put; @@ -202,7 +203,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb) ax25_queue_xmit(skb, dev); put: - + rcu_read_unlock(); ax25_route_lock_unuse(); return NETDEV_TX_OK; } diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c index 3db76d2470e9..8bca2ace98e5 100644 --- a/net/ax25/ax25_out.c +++ b/net/ax25/ax25_out.c @@ -39,10 +39,14 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *sr * specified. */ if (paclen == 0) { - if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) + rcu_read_lock(); + ax25_dev = ax25_dev_ax25dev(dev); + if (!ax25_dev) { + rcu_read_unlock(); return NULL; - + } paclen = ax25_dev->values[AX25_VALUES_PACLEN]; + rcu_read_unlock(); } /* @@ -53,13 +57,19 @@ ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, const ax25_address *sr return ax25; /* It already existed */ } - if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) + rcu_read_lock(); + ax25_dev = ax25_dev_ax25dev(dev); + if (!ax25_dev) { + rcu_read_unlock(); return NULL; + } - if ((ax25 = ax25_create_cb()) == NULL) + if ((ax25 = ax25_create_cb()) == NULL) { + rcu_read_unlock(); return NULL; - + } ax25_fillin_cb(ax25, ax25_dev); + rcu_read_unlock(); ax25->source_addr = *src; ax25->dest_addr = *dest; @@ -358,7 +368,9 @@ void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned char *ptr; + rcu_read_lock(); skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev)); + rcu_read_unlock(); ptr = skb_push(skb, 1); *ptr = 0x00; /* KISS */ diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index b7c4d656a94b..69de75db0c9c 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -406,6 +406,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) ax25_route_lock_unuse(); return -EHOSTUNREACH; } + rcu_read_lock(); if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) { err = -EHOSTUNREACH; goto put; @@ -442,6 +443,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr) } put: + rcu_read_unlock(); ax25_route_lock_unuse(); return err; } diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 7f6a7c96ac92..02e084b44053 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -325,8 +325,7 @@ batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm_packet->tvlv_len); - return (next_buff_pos <= packet_len) && - (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); + return next_buff_pos <= packet_len; } /* send a batman ogm to a given interface */ diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 54e41fc709c3..651e01b86141 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c @@ -113,8 +113,6 @@ static void batadv_v_hardif_neigh_init(struct batadv_hardif_neigh_node *hardif_neigh) { ewma_throughput_init(&hardif_neigh->bat_v.throughput); - INIT_WORK(&hardif_neigh->bat_v.metric_work, - batadv_v_elp_throughput_metric_update); } /** diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index 98a624f32b94..6dbb4266b558 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -27,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +44,18 @@ #include "routing.h" #include "send.h" +/** + * struct batadv_v_metric_queue_entry - list of hardif neighbors which require + * and metric update + */ +struct batadv_v_metric_queue_entry { + /** @hardif_neigh: hardif neighbor scheduled for metric update */ + struct batadv_hardif_neigh_node *hardif_neigh; + + /** @list: list node for metric_queue */ + struct list_head list; +}; + /** * batadv_v_elp_start_timer() - restart timer for ELP periodic work * @hard_iface: the interface for which the timer has to be reset @@ -60,25 +74,36 @@ static void batadv_v_elp_start_timer(struct batadv_hard_iface *hard_iface) /** * batadv_v_elp_get_throughput() - get the throughput towards a neighbour * @neigh: the neighbour for which the throughput has to be obtained + * @pthroughput: calculated throughput towards the given neighbour in multiples + * of 100kpbs (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc). * - * Return: The throughput towards the given neighbour in multiples of 100kpbs - * (a value of '1' equals 0.1Mbps, '10' equals 1Mbps, etc). + * Return: true when value behind @pthroughput was set */ -static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) +static bool batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh, + u32 *pthroughput) { struct batadv_hard_iface *hard_iface = neigh->if_incoming; + struct net_device *soft_iface = hard_iface->soft_iface; struct ethtool_link_ksettings link_settings; struct net_device *real_netdev; struct station_info sinfo; u32 throughput; int ret; + /* don't query throughput when no longer associated with any + * batman-adv interface + */ + if (!soft_iface) + return false; + /* if the user specified a customised value for this interface, then * return it directly */ throughput = atomic_read(&hard_iface->bat_v.throughput_override); - if (throughput != 0) - return throughput; + if (throughput != 0) { + *pthroughput = throughput; + return true; + } /* if this is a wireless device, then ask its throughput through * cfg80211 API @@ -105,27 +130,39 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) * possible to delete this neighbor. For now set * the throughput metric to 0. */ - return 0; + *pthroughput = 0; + return true; } if (ret) goto default_throughput; - if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) - return sinfo.expected_throughput / 100; + if (sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)) { + *pthroughput = sinfo.expected_throughput / 100; + return true; + } /* try to estimate the expected throughput based on reported tx * rates */ - if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)) - return cfg80211_calculate_bitrate(&sinfo.txrate) / 3; + if (sinfo.filled & BIT(NL80211_STA_INFO_TX_BITRATE)) { + *pthroughput = cfg80211_calculate_bitrate(&sinfo.txrate) / 3; + return true; + } goto default_throughput; } + /* only use rtnl_trylock because the elp worker will be cancelled while + * the rntl_lock is held. the cancel_delayed_work_sync() would otherwise + * wait forever when the elp work_item was started and it is then also + * trying to rtnl_lock + */ + if (!rtnl_trylock()) + return false; + /* if not a wifi interface, check if this device provides data via * ethtool (e.g. an Ethernet adapter) */ - rtnl_lock(); ret = __ethtool_get_link_ksettings(hard_iface->net_dev, &link_settings); rtnl_unlock(); if (ret == 0) { @@ -136,13 +173,15 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) hard_iface->bat_v.flags &= ~BATADV_FULL_DUPLEX; throughput = link_settings.base.speed; - if (throughput && throughput != SPEED_UNKNOWN) - return throughput * 10; + if (throughput && throughput != SPEED_UNKNOWN) { + *pthroughput = throughput * 10; + return true; + } } default_throughput: if (!(hard_iface->bat_v.flags & BATADV_WARNING_DEFAULT)) { - batadv_info(hard_iface->soft_iface, + batadv_info(soft_iface, "WiFi driver or ethtool info does not provide information about link speeds on interface %s, therefore defaulting to hardcoded throughput values of %u.%1u Mbps. Consider overriding the throughput manually or checking your driver.\n", hard_iface->net_dev->name, BATADV_THROUGHPUT_DEFAULT_VALUE / 10, @@ -151,31 +190,26 @@ default_throughput: } /* if none of the above cases apply, return the base_throughput */ - return BATADV_THROUGHPUT_DEFAULT_VALUE; + *pthroughput = BATADV_THROUGHPUT_DEFAULT_VALUE; + return true; } /** * batadv_v_elp_throughput_metric_update() - worker updating the throughput * metric of a single hop neighbour - * @work: the work queue item + * @neigh: the neighbour to probe */ -void batadv_v_elp_throughput_metric_update(struct work_struct *work) +static void +batadv_v_elp_throughput_metric_update(struct batadv_hardif_neigh_node *neigh) { - struct batadv_hardif_neigh_node_bat_v *neigh_bat_v; - struct batadv_hardif_neigh_node *neigh; + u32 throughput; + bool valid; - neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v, - metric_work); - neigh = container_of(neigh_bat_v, struct batadv_hardif_neigh_node, - bat_v); + valid = batadv_v_elp_get_throughput(neigh, &throughput); + if (!valid) + return; - ewma_throughput_add(&neigh->bat_v.throughput, - batadv_v_elp_get_throughput(neigh)); - - /* decrement refcounter to balance increment performed before scheduling - * this task - */ - batadv_hardif_neigh_put(neigh); + ewma_throughput_add(&neigh->bat_v.throughput, throughput); } /** @@ -249,14 +283,16 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh) */ static void batadv_v_elp_periodic_work(struct work_struct *work) { + struct batadv_v_metric_queue_entry *metric_entry; + struct batadv_v_metric_queue_entry *metric_safe; struct batadv_hardif_neigh_node *hardif_neigh; struct batadv_hard_iface *hard_iface; struct batadv_hard_iface_bat_v *bat_v; struct batadv_elp_packet *elp_packet; + struct list_head metric_queue; struct batadv_priv *bat_priv; struct sk_buff *skb; u32 elp_interval; - bool ret; bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v); @@ -292,6 +328,8 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) atomic_inc(&hard_iface->bat_v.elp_seqno); + INIT_LIST_HEAD(&metric_queue); + /* The throughput metric is updated on each sent packet. This way, if a * node is dead and no longer sends packets, batman-adv is still able to * react timely to its death. @@ -316,16 +354,28 @@ static void batadv_v_elp_periodic_work(struct work_struct *work) /* Reading the estimated throughput from cfg80211 is a task that * may sleep and that is not allowed in an rcu protected - * context. Therefore schedule a task for that. + * context. Therefore add it to metric_queue and process it + * outside rcu protected context. */ - ret = queue_work(batadv_event_workqueue, - &hardif_neigh->bat_v.metric_work); - - if (!ret) + metric_entry = kzalloc(sizeof(*metric_entry), GFP_ATOMIC); + if (!metric_entry) { batadv_hardif_neigh_put(hardif_neigh); + continue; + } + + metric_entry->hardif_neigh = hardif_neigh; + list_add(&metric_entry->list, &metric_queue); } rcu_read_unlock(); + list_for_each_entry_safe(metric_entry, metric_safe, &metric_queue, list) { + batadv_v_elp_throughput_metric_update(metric_entry->hardif_neigh); + + batadv_hardif_neigh_put(metric_entry->hardif_neigh); + list_del(&metric_entry->list); + kfree(metric_entry); + } + restart_timer: batadv_v_elp_start_timer(hard_iface); out: diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h index 9e2740195fa2..c9cb0a307100 100644 --- a/net/batman-adv/bat_v_elp.h +++ b/net/batman-adv/bat_v_elp.h @@ -10,7 +10,6 @@ #include "main.h" #include -#include int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface); void batadv_v_elp_iface_disable(struct batadv_hard_iface *hard_iface); @@ -19,6 +18,5 @@ void batadv_v_elp_iface_activate(struct batadv_hard_iface *primary_iface, void batadv_v_elp_primary_iface_set(struct batadv_hard_iface *primary_iface); int batadv_v_elp_packet_recv(struct sk_buff *skb, struct batadv_hard_iface *if_incoming); -void batadv_v_elp_throughput_metric_update(struct work_struct *work); #endif /* _NET_BATMAN_ADV_BAT_V_ELP_H_ */ diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 9f4815f4c8e8..deef817b28f0 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -840,8 +840,7 @@ batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, /* check if there is enough space for the optional TVLV */ next_buff_pos += ntohs(ogm2_packet->tvlv_len); - return (next_buff_pos <= packet_len) && - (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); + return next_buff_pos <= packet_len; } /** diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 4fc66cd95dc4..2b5453801bf0 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -990,16 +990,25 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) int tt_diff_len, tt_change_len = 0; int tt_diff_entries_num = 0; int tt_diff_entries_count = 0; + bool drop_changes = false; + size_t tt_extra_len = 0; u16 tvlv_len; tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes); tt_diff_len = batadv_tt_len(tt_diff_entries_num); /* if we have too many changes for one packet don't send any - * and wait for the tt table request which will be fragmented + * and wait for the tt table request so we can reply with the full + * (fragmented) table. + * + * The local change history should still be cleaned up so the next + * TT round can start again with a clean state. */ - if (tt_diff_len > bat_priv->soft_iface->mtu) + if (tt_diff_len > bat_priv->soft_iface->mtu) { tt_diff_len = 0; + tt_diff_entries_num = 0; + drop_changes = true; + } tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data, &tt_change, &tt_diff_len); @@ -1008,7 +1017,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) tt_data->flags = BATADV_TT_OGM_DIFF; - if (tt_diff_len == 0) + if (!drop_changes && tt_diff_len == 0) goto container_register; spin_lock_bh(&bat_priv->tt.changes_list_lock); @@ -1027,6 +1036,9 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) } spin_unlock_bh(&bat_priv->tt.changes_list_lock); + tt_extra_len = batadv_tt_len(tt_diff_entries_num - + tt_diff_entries_count); + /* Keep the buffer for possible tt_request */ spin_lock_bh(&bat_priv->tt.last_changeset_lock); kfree(bat_priv->tt.last_changeset); @@ -1035,6 +1047,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) tt_change_len = batadv_tt_len(tt_diff_entries_count); /* check whether this new OGM has no changes due to size problems */ if (tt_diff_entries_count > 0) { + tt_diff_len -= tt_extra_len; /* if kmalloc() fails we will reply with the full table * instead of providing the diff */ @@ -1047,6 +1060,8 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv) } spin_unlock_bh(&bat_priv->tt.last_changeset_lock); + /* Remove extra packet space for OGM */ + tvlv_len -= tt_extra_len; container_register: batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data, tvlv_len); @@ -2747,14 +2762,16 @@ static bool batadv_tt_global_valid(const void *entry_ptr, * * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb * is not provided then this becomes a no-op. + * + * Return: Remaining unused length in tvlv_buff. */ -static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, - struct batadv_hashtable *hash, - void *tvlv_buff, u16 tt_len, - bool (*valid_cb)(const void *, - const void *, - u8 *flags), - void *cb_data) +static u16 batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, + struct batadv_hashtable *hash, + void *tvlv_buff, u16 tt_len, + bool (*valid_cb)(const void *, + const void *, + u8 *flags), + void *cb_data) { struct batadv_tt_common_entry *tt_common_entry; struct batadv_tvlv_tt_change *tt_change; @@ -2768,7 +2785,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, tt_change = tvlv_buff; if (!valid_cb) - return; + return tt_len; rcu_read_lock(); for (i = 0; i < hash->size; i++) { @@ -2794,6 +2811,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv, } } rcu_read_unlock(); + + return batadv_tt_len(tt_tot - tt_num_entries); } /** @@ -3069,10 +3088,11 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv, goto out; /* fill the rest of the tvlv with the real TT entries */ - batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash, - tt_change, tt_len, - batadv_tt_global_valid, - req_dst_orig_node); + tvlv_len -= batadv_tt_tvlv_generate(bat_priv, + bat_priv->tt.global_hash, + tt_change, tt_len, + batadv_tt_global_valid, + req_dst_orig_node); } /* Don't send the response, if larger than fragmented packet. */ @@ -3196,9 +3216,11 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv, goto out; /* fill the rest of the tvlv with the real TT entries */ - batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash, - tt_change, tt_len, - batadv_tt_local_valid, NULL); + tvlv_len -= batadv_tt_tvlv_generate(bat_priv, + bat_priv->tt.local_hash, + tt_change, tt_len, + batadv_tt_local_valid, + NULL); } tvlv_tt_data->flags = BATADV_TT_RESPONSE; diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 76791815b26b..6f30afaabfdf 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -596,9 +596,6 @@ struct batadv_hardif_neigh_node_bat_v { * neighbor */ unsigned long last_unicast_tx; - - /** @metric_work: work queue callback item for metric update */ - struct work_struct metric_work; }; /** diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index 4eb1b3ced0d2..db119071a0ea 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -825,11 +825,16 @@ static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan, unsigned long hdr_len, unsigned long len, int nb) { + struct sk_buff *skb; + /* Note that we must allocate using GFP_ATOMIC here as * this function is called originally from netdev hard xmit * function in atomic context. */ - return bt_skb_alloc(hdr_len + len, GFP_ATOMIC); + skb = bt_skb_alloc(hdr_len + len, GFP_ATOMIC); + if (!skb) + return ERR_PTR(-ENOMEM); + return skb; } static void chan_suspend_cb(struct l2cap_chan *chan) diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index f93f3e7a3d90..3cd7c212375f 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -3829,25 +3829,27 @@ static void hci_tx_work(struct work_struct *work) /* ACL data packet */ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { - struct hci_acl_hdr *hdr = (void *) skb->data; + struct hci_acl_hdr *hdr; struct hci_conn *conn; __u16 handle, flags; - skb_pull(skb, HCI_ACL_HDR_SIZE); + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ACL packet too small"); + goto drop; + } handle = __le16_to_cpu(hdr->handle); flags = hci_flags(handle); handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, - handle, flags); + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); hdev->stat.acl_rx++; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, handle); - if (conn && hci_dev_test_flag(hdev, HCI_MGMT)) - mgmt_device_connected(hdev, conn, NULL, 0); hci_dev_unlock(hdev); if (conn) { @@ -3861,6 +3863,7 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) handle); } +drop: kfree_skb(skb); } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 7c1df481ebe9..8667723d4e96 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -3477,23 +3477,30 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, hci_update_scan(hdev); } - params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); - if (params) { - switch (params->auto_connect) { - case HCI_AUTO_CONN_LINK_LOSS: - if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + /* Re-enable passive scanning if disconnected device is marked + * as auto-connectable. + */ + if (conn->type == LE_LINK) { + params = hci_conn_params_lookup(hdev, &conn->dst, + conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + fallthrough; + + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, + &hdev->pend_le_conns); + hci_update_passive_scan(hdev); break; - fallthrough; - case HCI_AUTO_CONN_DIRECT: - case HCI_AUTO_CONN_ALWAYS: - hci_pend_le_list_del_init(params); - hci_pend_le_list_add(params, &hdev->pend_le_conns); - hci_update_passive_scan(hdev); - break; - - default: - break; + default: + break; + } } } @@ -5648,150 +5655,6 @@ unlock: hci_dev_unlock(hdev); } -#if IS_ENABLED(CONFIG_BT_HS) -static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_channel_selected *ev = data; - struct hci_conn *hcon; - - bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); - - hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (!hcon) - return; - - amp_read_loc_assoc_final_data(hdev, hcon); -} - -static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_phy_link_complete *ev = data; - struct hci_conn *hcon, *bredr_hcon; - - bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, - ev->status); - - hci_dev_lock(hdev); - - hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (!hcon) - goto unlock; - - if (!hcon->amp_mgr) - goto unlock; - - if (ev->status) { - hci_conn_del(hcon); - goto unlock; - } - - bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; - - hcon->state = BT_CONNECTED; - bacpy(&hcon->dst, &bredr_hcon->dst); - - hci_conn_hold(hcon); - hcon->disc_timeout = HCI_DISCONN_TIMEOUT; - hci_conn_drop(hcon); - - hci_debugfs_create_conn(hcon); - hci_conn_add_sysfs(hcon); - - amp_physical_cfm(bredr_hcon, hcon); - -unlock: - hci_dev_unlock(hdev); -} - -static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_logical_link_complete *ev = data; - struct hci_conn *hcon; - struct hci_chan *hchan; - struct amp_mgr *mgr; - - bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", - le16_to_cpu(ev->handle), ev->phy_handle, ev->status); - - hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (!hcon) - return; - - /* Create AMP hchan */ - hchan = hci_chan_create(hcon); - if (!hchan) - return; - - hchan->handle = le16_to_cpu(ev->handle); - hchan->amp = true; - - BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); - - mgr = hcon->amp_mgr; - if (mgr && mgr->bredr_chan) { - struct l2cap_chan *bredr_chan = mgr->bredr_chan; - - l2cap_chan_lock(bredr_chan); - - bredr_chan->conn->mtu = hdev->block_mtu; - l2cap_logical_cfm(bredr_chan, hchan, 0); - hci_conn_hold(hcon); - - l2cap_chan_unlock(bredr_chan); - } -} - -static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_disconn_logical_link_complete *ev = data; - struct hci_chan *hchan; - - bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", - le16_to_cpu(ev->handle), ev->status); - - if (ev->status) - return; - - hci_dev_lock(hdev); - - hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); - if (!hchan || !hchan->amp) - goto unlock; - - amp_destroy_logical_link(hchan, ev->reason); - -unlock: - hci_dev_unlock(hdev); -} - -static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, - struct sk_buff *skb) -{ - struct hci_ev_disconn_phy_link_complete *ev = data; - struct hci_conn *hcon; - - bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); - - if (ev->status) - return; - - hci_dev_lock(hdev); - - hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); - if (hcon && hcon->type == AMP_LINK) { - hcon->state = BT_CLOSED; - hci_disconn_cfm(hcon, ev->reason); - hci_conn_del(hcon); - } - - hci_dev_unlock(hdev); -} -#endif - static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, u8 bdaddr_type, bdaddr_t *local_rpa) { @@ -6385,11 +6248,12 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, * event or send an immediate device found event if the data * should not be stored for later. */ - if (!ext_adv && !has_pending_adv_report(hdev)) { + if (!has_pending_adv_report(hdev)) { /* If the report will trigger a SCAN_REQ store it for * later merging. */ - if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { store_pending_adv_report(hdev, bdaddr, bdaddr_type, rssi, flags, data, len); return; @@ -7473,25 +7337,6 @@ static const struct hci_ev { /* [0x3e = HCI_EV_LE_META] */ HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), -#if IS_ENABLED(CONFIG_BT_HS) - /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ - HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, - sizeof(struct hci_ev_phy_link_complete)), - /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ - HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, - sizeof(struct hci_ev_channel_selected)), - /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ - HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, - hci_disconn_loglink_complete_evt, - sizeof(struct hci_ev_disconn_logical_link_complete)), - /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ - HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, - sizeof(struct hci_ev_logical_link_complete)), - /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ - HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, - hci_disconn_phylink_complete_evt, - sizeof(struct hci_ev_disconn_phy_link_complete)), -#endif /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, sizeof(struct hci_ev_num_comp_blocks)), diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 862ac5e1f4b4..c6108e68f5a9 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -650,6 +650,17 @@ void hci_cmd_sync_init(struct hci_dev *hdev) INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); } +static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, + struct hci_cmd_sync_work_entry *entry, + int err) +{ + if (entry->destroy) + entry->destroy(hdev, entry->data, err); + + list_del(&entry->list); + kfree(entry); +} + void hci_cmd_sync_clear(struct hci_dev *hdev) { struct hci_cmd_sync_work_entry *entry, *tmp; @@ -658,13 +669,8 @@ void hci_cmd_sync_clear(struct hci_dev *hdev) cancel_work_sync(&hdev->reenable_adv_work); mutex_lock(&hdev->cmd_sync_work_lock); - list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { - if (entry->destroy) - entry->destroy(hdev, entry->data, -ECANCELED); - - list_del(&entry->list); - kfree(entry); - } + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); mutex_unlock(&hdev->cmd_sync_work_lock); } @@ -756,6 +762,115 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, } EXPORT_SYMBOL(hci_cmd_sync_queue); +static struct hci_cmd_sync_work_entry * +_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { + if (func && entry->func != func) + continue; + + if (data && entry->data != data) + continue; + + if (destroy && entry->destroy != destroy) + continue; + + return entry; + } + + return NULL; +} + +/* Queue HCI command entry once: + * + * - Lookup if an entry already exist and only if it doesn't creates a new entry + * and queue it. + */ +int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) + return 0; + + return hci_cmd_sync_queue(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_queue_once); + +/* Lookup HCI command entry: + * + * - Return first entry that matches by function callback or data or + * destroy callback. + */ +struct hci_cmd_sync_work_entry * +hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + + mutex_lock(&hdev->cmd_sync_work_lock); + entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); + mutex_unlock(&hdev->cmd_sync_work_lock); + + return entry; +} +EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); + +/* Cancel HCI command entry */ +void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, + struct hci_cmd_sync_work_entry *entry) +{ + mutex_lock(&hdev->cmd_sync_work_lock); + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + mutex_unlock(&hdev->cmd_sync_work_lock); +} +EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); + +/* Dequeue one HCI command entry: + * + * - Lookup and cancel first entry that matches. + */ +bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, + hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + + entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); + if (!entry) + return false; + + hci_cmd_sync_cancel_entry(hdev, entry); + + return true; +} +EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); + +/* Dequeue HCI command entry: + * + * - Lookup and cancel any entry that matches by function callback or data or + * destroy callback. + */ +bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + bool ret = false; + + mutex_lock(&hdev->cmd_sync_work_lock); + while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, + destroy))) { + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + ret = true; + } + mutex_unlock(&hdev->cmd_sync_work_lock); + + return ret; +} +EXPORT_SYMBOL(hci_cmd_sync_dequeue); + int hci_update_eir_sync(struct hci_dev *hdev) { struct hci_cp_write_eir cp; @@ -891,9 +1006,9 @@ static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) { - /* If we're advertising or initiating an LE connection we can't - * go ahead and change the random address at this time. This is - * because the eventual initiator address used for the + /* If a random_addr has been set we're advertising or initiating an LE + * connection we can't go ahead and change the random address at this + * time. This is because the eventual initiator address used for the * subsequently created connection will be undefined (some * controllers use the new address and others the one we had * when the operation started). @@ -901,8 +1016,9 @@ static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) * In this kind of scenario skip the update and let the random * address be updated at the next cycle. */ - if (hci_dev_test_flag(hdev, HCI_LE_ADV) || - hci_lookup_le_connect(hdev)) { + if (bacmp(&hdev->random_addr, BDADDR_ANY) && + (hci_dev_test_flag(hdev, HCI_LE_ADV) || + hci_lookup_le_connect(hdev))) { bt_dev_dbg(hdev, "Deferring random address update"); hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); return 0; @@ -3023,7 +3139,8 @@ int hci_update_passive_scan(struct hci_dev *hdev) hci_dev_test_flag(hdev, HCI_UNREGISTER)) return 0; - return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); + return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, + NULL); } int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 633b82d54272..cc7d4a8ed8ce 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -19,16 +19,6 @@ static const struct device_type bt_link = { .release = bt_link_release, }; -/* - * The rfcomm tty device will possibly retain even when conn - * is down, and sysfs doesn't support move zombie device, - * so we should move the device before conn device is destroyed. - */ -static int __match_tty(struct device *dev, void *data) -{ - return !strncmp(dev_name(dev), "rfcomm", 6); -} - void hci_conn_init_sysfs(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; @@ -71,10 +61,13 @@ void hci_conn_del_sysfs(struct hci_conn *conn) return; } + /* If there are devices using the connection as parent reset it to NULL + * before unregistering the device. + */ while (1) { struct device *dev; - dev = device_find_child(&conn->dev, NULL, __match_tty); + dev = device_find_any_child(&conn->dev); if (!dev) break; device_move(dev, NULL, DPM_ORDER_DEV_LAST); diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 27efca5dc7bb..437cbeaa9619 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -964,7 +964,11 @@ static int iso_sock_accept(struct socket *sock, struct socket *newsock, long timeo; int err = 0; - lock_sock(sk); + /* Use explicit nested locking to avoid lockdep warnings generated + * because the parent socket and the child socket are locked on the + * same thread. + */ + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); @@ -995,7 +999,7 @@ static int iso_sock_accept(struct socket *sock, struct socket *newsock, release_sock(sk); timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); - lock_sock(sk); + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); } remove_wait_queue(sk_sleep(sk), &wait); @@ -1189,7 +1193,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; - int len, err = 0; + int err = 0; struct bt_iso_qos qos; u32 opt; @@ -1204,10 +1208,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = bt_copy_from_sockptr(&opt, sizeof(opt), optval, optlen); + if (err) break; - } if (opt) set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); @@ -1222,18 +1225,9 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, break; } - len = min_t(unsigned int, sizeof(qos), optlen); - if (len != sizeof(qos)) { - err = -EINVAL; + err = bt_copy_from_sockptr(&qos, sizeof(qos), optval, optlen); + if (err) break; - } - - memset(&qos, 0, sizeof(qos)); - - if (copy_from_sockptr(&qos, optval, len)) { - err = -EFAULT; - break; - } if (!check_qos(&qos)) { err = -EINVAL; @@ -1252,18 +1246,16 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, } if (optlen > sizeof(iso_pi(sk)->base)) { - err = -EOVERFLOW; + err = -EINVAL; break; } - len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen); - - if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) { - err = -EFAULT; + err = bt_copy_from_sockptr(iso_pi(sk)->base, optlen, optval, + optlen); + if (err) break; - } - iso_pi(sk)->base_len = len; + iso_pi(sk)->base_len = optlen; break; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 187c91843876..cb9b1edfcea2 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -656,7 +656,8 @@ void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) hci_conn_hold(conn->hcon); - list_add(&chan->list, &conn->chan_l); + /* Append to the list since the order matters for ECRED */ + list_add_tail(&chan->list, &conn->chan_l); } void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) @@ -1560,7 +1561,8 @@ static void l2cap_request_info(struct l2cap_conn *conn) sizeof(req), &req); } -static bool l2cap_check_enc_key_size(struct hci_conn *hcon) +static bool l2cap_check_enc_key_size(struct hci_conn *hcon, + struct l2cap_chan *chan) { /* The minimum encryption key size needs to be enforced by the * host stack before establishing any L2CAP connections. The @@ -1574,7 +1576,7 @@ static bool l2cap_check_enc_key_size(struct hci_conn *hcon) int min_key_size = hcon->hdev->min_enc_key_size; /* On FIPS security level, key size must be 16 bytes */ - if (hcon->sec_level == BT_SECURITY_FIPS) + if (chan->sec_level == BT_SECURITY_FIPS) min_key_size = 16; return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || @@ -1602,7 +1604,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) !__l2cap_no_conn_pending(chan)) return; - if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -1684,7 +1686,7 @@ static void l2cap_conn_start(struct l2cap_conn *conn) continue; } - if (l2cap_check_enc_key_size(conn->hcon)) + if (l2cap_check_enc_key_size(conn->hcon, chan)) l2cap_start_connection(chan); else l2cap_chan_close(chan, ECONNREFUSED); @@ -3995,7 +3997,11 @@ static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) { struct l2cap_ecred_rsp_data *rsp = data; - if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + /* Check if channel for outgoing connection or if it wasn't deferred + * since in those cases it must be skipped. + */ + if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags) || + !test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) return; /* Reset ident so only one response is sent */ @@ -4181,7 +4187,8 @@ static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, /* Check if the ACL is secure enough (if not SDP) */ if (psm != cpu_to_le16(L2CAP_PSM_SDP) && - !hci_conn_check_link_mode(conn->hcon)) { + (!hci_conn_check_link_mode(conn->hcon) || + !l2cap_check_enc_key_size(conn->hcon, pchan))) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; goto response; @@ -8412,7 +8419,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) } if (chan->state == BT_CONNECT) { - if (!status && l2cap_check_enc_key_size(hcon)) + if (!status && l2cap_check_enc_key_size(hcon, chan)) l2cap_start_connection(chan); else __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); @@ -8422,7 +8429,7 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) struct l2cap_conn_rsp rsp; __u16 res, stat; - if (!status && l2cap_check_enc_key_size(hcon)) { + if (!status && l2cap_check_enc_key_size(hcon, chan)) { if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { res = L2CAP_CR_PEND; stat = L2CAP_CS_AUTHOR_PEND; diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index b17782dc513b..bdfc83eb7aef 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -728,12 +728,12 @@ static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) { switch (chan->scid) { case L2CAP_CID_ATT: - if (mtu < L2CAP_LE_MIN_MTU) + if (mtu && mtu < L2CAP_LE_MIN_MTU) return false; break; default: - if (mtu < L2CAP_DEFAULT_MIN_MTU) + if (mtu && mtu < L2CAP_DEFAULT_MIN_MTU) return false; } @@ -1920,6 +1920,8 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, chan = l2cap_chan_create(); if (!chan) { sk_free(sk); + if (sock) + sock->sk = NULL; return NULL; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 5a1015ccc063..27bd8c8ddeb0 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1329,7 +1329,8 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) struct mgmt_mode *cp; /* Make sure cmd still outstanding. */ - if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_POWERED, hdev)) return; cp = cmd->param; @@ -1362,7 +1363,13 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) static int set_powered_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_mode *cp = cmd->param; + struct mgmt_mode *cp; + + /* Make sure cmd still outstanding. */ + if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev)) + return -ECANCELED; + + cp = cmd->param; BT_DBG("%s", hdev->name); @@ -1457,10 +1464,15 @@ static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data) static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) { - if (cmd->cmd_complete) { - u8 *status = data; + struct cmd_lookup *match = data; - cmd->cmd_complete(cmd, *status); + /* dequeue cmd_sync entries using cmd as data as that is about to be + * removed/freed. + */ + hci_cmd_sync_dequeue(match->hdev, NULL, cmd, NULL); + + if (cmd->cmd_complete) { + cmd->cmd_complete(cmd, match->mgmt_status); mgmt_pending_remove(cmd); return; @@ -1509,7 +1521,8 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "err %d", err); /* Make sure cmd still outstanding. */ - if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) return; hci_dev_lock(hdev); @@ -1683,7 +1696,8 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, bt_dev_dbg(hdev, "err %d", err); /* Make sure cmd still outstanding. */ - if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) return; hci_dev_lock(hdev); @@ -1916,7 +1930,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) bool changed; /* Make sure cmd still outstanding. */ - if (cmd != pending_find(MGMT_OP_SET_SSP, hdev)) + if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev)) return; if (err) { @@ -3841,7 +3855,8 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err) bt_dev_dbg(hdev, "err %d", err); - if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev)) return; if (status) { @@ -4016,7 +4031,8 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err) struct sk_buff *skb = cmd->skb; u8 status = mgmt_status(err); - if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) return; if (!status) { @@ -5508,10 +5524,16 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, { struct mgmt_rp_remove_adv_monitor rp; struct mgmt_pending_cmd *cmd = data; - struct mgmt_cp_remove_adv_monitor *cp = cmd->param; + struct mgmt_cp_remove_adv_monitor *cp; + + if (status == -ECANCELED || + cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) + return; hci_dev_lock(hdev); + cp = cmd->param; + rp.monitor_handle = cp->monitor_handle; if (!status) @@ -5529,6 +5551,10 @@ static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; + + if (cmd != pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) + return -ECANCELED; + struct mgmt_cp_remove_adv_monitor *cp = cmd->param; u16 handle = __le16_to_cpu(cp->monitor_handle); @@ -5907,13 +5933,16 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; + bt_dev_dbg(hdev, "err %d", err); + + if (err == -ECANCELED) + return; + if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) && cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) && cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev)) return; - bt_dev_dbg(hdev, "err %d", err); - mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), cmd->param, 1); mgmt_pending_remove(cmd); @@ -6146,7 +6175,8 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) { struct mgmt_pending_cmd *cmd = data; - if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev)) return; bt_dev_dbg(hdev, "err %d", err); @@ -8093,7 +8123,8 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data, u8 status = mgmt_status(err); u16 eir_len; - if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev)) + if (err == -ECANCELED || + cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev)) return; if (!status) { @@ -9424,14 +9455,14 @@ void mgmt_index_added(struct hci_dev *hdev) void mgmt_index_removed(struct hci_dev *hdev) { struct mgmt_ev_ext_index ev; - u8 status = MGMT_STATUS_INVALID_INDEX; + struct cmd_lookup match = { NULL, hdev, MGMT_STATUS_INVALID_INDEX }; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return; switch (hdev->dev_type) { case HCI_PRIMARY: - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); + mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match); if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, @@ -9489,7 +9520,7 @@ void mgmt_power_on(struct hci_dev *hdev, int err) void __mgmt_power_off(struct hci_dev *hdev) { struct cmd_lookup match = { NULL, hdev }; - u8 status, zero_cod[] = { 0, 0, 0 }; + u8 zero_cod[] = { 0, 0, 0 }; mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); @@ -9501,11 +9532,11 @@ void __mgmt_power_off(struct hci_dev *hdev) * status responses. */ if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) - status = MGMT_STATUS_INVALID_INDEX; + match.mgmt_status = MGMT_STATUS_INVALID_INDEX; else - status = MGMT_STATUS_NOT_POWERED; + match.mgmt_status = MGMT_STATUS_NOT_POWERED; - mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); + mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &match); if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) { mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, @@ -9702,6 +9733,9 @@ void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) + eir_precalc_len(sizeof(conn->dev_class))); + if (!skb) + return; + ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, &conn->dst); ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); @@ -10483,6 +10517,8 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0)); + if (!skb) + return; ev = skb_put(skb, sizeof(*ev)); bacpy(&ev->addr.bdaddr, bdaddr); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index cbff37b32734..1cf6543bdec5 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -274,13 +274,13 @@ static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, struct rfcomm_dlc *d; struct sock *sk; - sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern); - if (!sk) + d = rfcomm_dlc_alloc(prio); + if (!d) return NULL; - d = rfcomm_dlc_alloc(prio); - if (!d) { - sk_free(sk); + sk = bt_sock_alloc(net, sock, &rfcomm_proto, proto, prio, kern); + if (!sk) { + rfcomm_dlc_free(d); return NULL; } @@ -729,7 +729,8 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u struct sock *l2cap_sk; struct l2cap_conn *conn; struct rfcomm_conninfo cinfo; - int len, err = 0; + int err = 0; + size_t len; u32 opt; BT_DBG("sk %p", sk); @@ -783,7 +784,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u cinfo.hci_handle = conn->hcon->handle; memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); - len = min_t(unsigned int, len, sizeof(cinfo)); + len = min(len, sizeof(cinfo)); if (copy_to_user(optval, (char *) &cinfo, len)) err = -EFAULT; @@ -802,7 +803,8 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c { struct sock *sk = sock->sk; struct bt_security sec; - int len, err = 0; + int err = 0; + size_t len; BT_DBG("sk %p", sk); @@ -827,7 +829,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c sec.level = rfcomm_pi(sk)->sec_level; sec.key_size = 0; - len = min_t(unsigned int, len, sizeof(sec)); + len = min(len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) err = -EFAULT; diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index ad5afde17213..fe8728041ad0 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -268,10 +268,13 @@ static int sco_connect(struct sock *sk) else type = SCO_LINK; - if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && - (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) { - err = -EOPNOTSUPP; - goto unlock; + switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + break; } hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, @@ -888,13 +891,6 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, if (err) break; - /* Explicitly check for these values */ - if (voice.setting != BT_VOICE_TRANSPARENT && - voice.setting != BT_VOICE_CVSD_16BIT) { - err = -EINVAL; - break; - } - sco_pi(sk)->setting = voice.setting; hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); @@ -902,9 +898,14 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, err = -EBADFD; break; } - if (enhanced_sync_conn_capable(hdev) && - voice.setting == BT_VOICE_TRANSPARENT) - sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; + + switch (sco_pi(sk)->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (enhanced_sync_conn_capable(hdev)) + sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; + break; + } + hci_dev_put(hdev); break; diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 64be562f0fe3..77b386b76d46 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -768,12 +768,9 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, void __user *data_in = u64_to_user_ptr(kattr->test.data_in); void *data; - if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) + if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom) return ERR_PTR(-EINVAL); - if (user_size > size) - return ERR_PTR(-EMSGSIZE); - size = SKB_DATA_ALIGN(size); data = kzalloc(size + headroom + tailroom, GFP_USER); if (!data) diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 5c6ed1d49b92..b4d661fe7886 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -366,9 +366,9 @@ br_nf_ipv4_daddr_was_changed(const struct sk_buff *skb, */ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct net_device *dev = skb->dev, *br_indev; - struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); + struct net_device *dev = skb->dev, *br_indev; + const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; int err; @@ -386,7 +386,9 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ } nf_bridge->in_prerouting = 0; if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) { - if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { + err = ip_route_input(skb, iph->daddr, iph->saddr, + ip4h_dscp(iph), dev); + if (err) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a diff --git a/net/bridge/br_nf_core.c b/net/bridge/br_nf_core.c index 8c69f0c95a8e..b8c8deb87407 100644 --- a/net/bridge/br_nf_core.c +++ b/net/bridge/br_nf_core.c @@ -65,17 +65,14 @@ static struct dst_ops fake_dst_ops = { * ipt_REJECT needs it. Future netfilter modules might * require us to fill additional fields. */ -static const u32 br_dst_default_metrics[RTAX_MAX] = { - [RTAX_MTU - 1] = 1500, -}; - void br_netfilter_rtable_init(struct net_bridge *br) { struct rtable *rt = &br->fake_rtable; atomic_set(&rt->dst.__refcnt, 1); rt->dst.dev = br->dev; - dst_init_metrics(&rt->dst, br_dst_default_metrics, true); + dst_init_metrics(&rt->dst, br->metrics, false); + dst_metric_set(&rt->dst, RTAX_MTU, br->dev->mtu); rt->dst.flags = DST_NOXFRM | DST_FAKE_RTABLE; rt->dst.ops = &fake_dst_ops; } diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 940de9516768..19fb50549252 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -478,6 +478,7 @@ struct net_bridge { struct rtable fake_rtable; struct rt6_info fake_rt6_info; }; + u32 metrics[RTAX_MAX]; #endif u16 group_fwd_mask; u16 group_fwd_mask_required; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 9ffd40b8270c..23a82567fe62 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -715,8 +715,8 @@ static int br_vlan_add_existing(struct net_bridge *br, u16 flags, bool *changed, struct netlink_ext_ack *extack) { - bool would_change = __vlan_flags_would_change(vlan, flags); bool becomes_brentry = false; + bool would_change = false; int err; if (!br_vlan_is_brentry(vlan)) { @@ -725,6 +725,8 @@ static int br_vlan_add_existing(struct net_bridge *br, return -EINVAL; becomes_brentry = true; + } else { + would_change = __vlan_flags_would_change(vlan, flags); } /* Master VLANs that aren't brentries weren't notified before, diff --git a/net/can/af_can.c b/net/can/af_can.c index c69168f11e44..bbd8e959137d 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c @@ -171,6 +171,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol, /* release sk on errors */ sock_orphan(sk); sock_put(sk); + sock->sk = NULL; } errout: @@ -286,8 +287,8 @@ int can_send(struct sk_buff *skb, int loop) netif_rx(newskb); /* update statistics */ - pkg_stats->tx_frames++; - pkg_stats->tx_frames_delta++; + atomic_long_inc(&pkg_stats->tx_frames); + atomic_long_inc(&pkg_stats->tx_frames_delta); return 0; @@ -647,8 +648,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) int matches; /* update statistics */ - pkg_stats->rx_frames++; - pkg_stats->rx_frames_delta++; + atomic_long_inc(&pkg_stats->rx_frames); + atomic_long_inc(&pkg_stats->rx_frames_delta); /* create non-zero unique skb identifier together with *skb */ while (!(can_skb_prv(skb)->skbcnt)) @@ -669,8 +670,8 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) consume_skb(skb); if (matches > 0) { - pkg_stats->matches++; - pkg_stats->matches_delta++; + atomic_long_inc(&pkg_stats->matches); + atomic_long_inc(&pkg_stats->matches_delta); } } diff --git a/net/can/af_can.h b/net/can/af_can.h index 7c2d9161e224..22f3352c77fe 100644 --- a/net/can/af_can.h +++ b/net/can/af_can.h @@ -66,9 +66,9 @@ struct receiver { struct can_pkg_stats { unsigned long jiffies_init; - unsigned long rx_frames; - unsigned long tx_frames; - unsigned long matches; + atomic_long_t rx_frames; + atomic_long_t tx_frames; + atomic_long_t matches; unsigned long total_rx_rate; unsigned long total_tx_rate; @@ -82,9 +82,9 @@ struct can_pkg_stats { unsigned long max_tx_rate; unsigned long max_rx_match_ratio; - unsigned long rx_frames_delta; - unsigned long tx_frames_delta; - unsigned long matches_delta; + atomic_long_t rx_frames_delta; + atomic_long_t tx_frames_delta; + atomic_long_t matches_delta; }; /* persistent statistics */ diff --git a/net/can/bcm.c b/net/can/bcm.c index 3817692e83af..4fb5cfaf74f3 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -58,6 +58,7 @@ #include #include #include +#include #include #include @@ -120,6 +121,7 @@ struct bcm_op { struct canfd_frame last_sframe; struct sock *sk; struct net_device *rx_reg_dev; + spinlock_t bcm_tx_lock; /* protect currframe/count in runtime updates */ }; struct bcm_sock { @@ -205,7 +207,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, " / bound %s", bcm_proc_getifname(net, ifname, bo->ifindex)); seq_printf(m, " <<<\n"); - list_for_each_entry(op, &bo->rx_ops, list) { + rcu_read_lock(); + + list_for_each_entry_rcu(op, &bo->rx_ops, list) { unsigned long reduction; @@ -261,6 +265,9 @@ static int bcm_proc_show(struct seq_file *m, void *v) seq_printf(m, "# sent %ld\n", op->frames_abs); } seq_putc(m, '\n'); + + rcu_read_unlock(); + return 0; } #endif /* CONFIG_PROC_FS */ @@ -273,13 +280,18 @@ static void bcm_can_tx(struct bcm_op *op) { struct sk_buff *skb; struct net_device *dev; - struct canfd_frame *cf = op->frames + op->cfsiz * op->currframe; + struct canfd_frame *cf; int err; /* no target device? => exit */ if (!op->ifindex) return; + /* read currframe under lock protection */ + spin_lock_bh(&op->bcm_tx_lock); + cf = op->frames + op->cfsiz * op->currframe; + spin_unlock_bh(&op->bcm_tx_lock); + dev = dev_get_by_index(sock_net(op->sk), op->ifindex); if (!dev) { /* RFC: should this bcm_op remove itself here? */ @@ -300,6 +312,10 @@ static void bcm_can_tx(struct bcm_op *op) skb->dev = dev; can_skb_set_owner(skb, op->sk); err = can_send(skb, 1); + + /* update currframe and count under lock protection */ + spin_lock_bh(&op->bcm_tx_lock); + if (!err) op->frames_abs++; @@ -308,6 +324,11 @@ static void bcm_can_tx(struct bcm_op *op) /* reached last frame? */ if (op->currframe >= op->nframes) op->currframe = 0; + + if (op->count > 0) + op->count--; + + spin_unlock_bh(&op->bcm_tx_lock); out: dev_put(dev); } @@ -404,7 +425,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) struct bcm_msg_head msg_head; if (op->kt_ival1 && (op->count > 0)) { - op->count--; + bcm_can_tx(op); if (!op->count && (op->flags & TX_COUNTEVT)) { /* create notification to user */ @@ -419,7 +440,6 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) bcm_send_to_user(op, &msg_head, NULL, 0); } - bcm_can_tx(op); } else if (op->kt_ival2) { bcm_can_tx(op); @@ -801,7 +821,7 @@ static int bcm_delete_rx_op(struct list_head *ops, struct bcm_msg_head *mh, REGMASK(op->can_id), bcm_rx_handler, op); - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return 1; /* done */ } @@ -821,7 +841,7 @@ static int bcm_delete_tx_op(struct list_head *ops, struct bcm_msg_head *mh, list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == mh->can_id) && (op->ifindex == ifindex) && (op->flags & CAN_FD_FRAME) == (mh->flags & CAN_FD_FRAME)) { - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return 1; /* done */ } @@ -914,6 +934,27 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } op->flags = msg_head->flags; + /* only lock for unlikely count/nframes/currframe changes */ + if (op->nframes != msg_head->nframes || + op->flags & TX_RESET_MULTI_IDX || + op->flags & SETTIMER) { + + spin_lock_bh(&op->bcm_tx_lock); + + if (op->nframes != msg_head->nframes || + op->flags & TX_RESET_MULTI_IDX) { + /* potentially update changed nframes */ + op->nframes = msg_head->nframes; + /* restart multiple frame transmission */ + op->currframe = 0; + } + + if (op->flags & SETTIMER) + op->count = msg_head->count; + + spin_unlock_bh(&op->bcm_tx_lock); + } + } else { /* insert new BCM operation for the given can_id */ @@ -921,9 +962,14 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, if (!op) return -ENOMEM; + spin_lock_init(&op->bcm_tx_lock); op->can_id = msg_head->can_id; op->cfsiz = CFSIZ(msg_head->flags); op->flags = msg_head->flags; + op->nframes = msg_head->nframes; + + if (op->flags & SETTIMER) + op->count = msg_head->count; /* create array for CAN frames and copy the data */ if (msg_head->nframes > 1) { @@ -982,22 +1028,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ - if (op->nframes != msg_head->nframes) { - op->nframes = msg_head->nframes; - /* start multiple frame transmission with index 0 */ - op->currframe = 0; - } - - /* check flags */ - - if (op->flags & TX_RESET_MULTI_IDX) { - /* start multiple frame transmission with index 0 */ - op->currframe = 0; - } - if (op->flags & SETTIMER) { /* set timer values */ - op->count = msg_head->count; op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = bcm_timeval_to_ktime(msg_head->ival1); @@ -1014,11 +1046,8 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, op->flags |= TX_ANNOUNCE; } - if (op->flags & TX_ANNOUNCE) { + if (op->flags & TX_ANNOUNCE) bcm_can_tx(op); - if (op->count) - op->count--; - } if (op->flags & STARTTIMER) bcm_tx_start_timer(op); @@ -1234,7 +1263,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, bcm_rx_handler, op, "bcm", sk); if (err) { /* this bcm rx op is broken -> remove it */ - list_del(&op->list); + list_del_rcu(&op->list); bcm_remove_op(op); return err; } diff --git a/net/can/gw.c b/net/can/gw.c index 23a3d89cad81..5933423663cd 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -130,7 +130,7 @@ struct cgw_job { u32 handled_frames; u32 dropped_frames; u32 deleted_frames; - struct cf_mod mod; + struct cf_mod __rcu *cf_mod; union { /* CAN frame data source */ struct net_device *dev; @@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) struct cgw_job *gwj = (struct cgw_job *)data; struct canfd_frame *cf; struct sk_buff *nskb; + struct cf_mod *mod; int modidx = 0; /* process strictly Classic CAN or CAN FD frames */ @@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) * When there is at least one modification function activated, * we need to copy the skb as we want to modify skb->data. */ - if (gwj->mod.modfunc[0]) + mod = rcu_dereference(gwj->cf_mod); + if (mod->modfunc[0]) nskb = skb_copy(skb, GFP_ATOMIC); else nskb = skb_clone(skb, GFP_ATOMIC); @@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) cf = (struct canfd_frame *)nskb->data; /* perform preprocessed modification functions if there are any */ - while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) - (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); + while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx]) + (*mod->modfunc[modidx++])(cf, mod); /* Has the CAN frame been modified? */ if (modidx) { @@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) } /* check for checksum updates */ - if (gwj->mod.csumfunc.crc8) - (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); + if (mod->csumfunc.crc8) + (*mod->csumfunc.crc8)(cf, &mod->csum.crc8); - if (gwj->mod.csumfunc.xor) - (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); + if (mod->csumfunc.xor) + (*mod->csumfunc.xor)(cf, &mod->csum.xor); } /* clear the skb timestamp if not configured the other way */ @@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head) { struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu); + /* cgw_job::cf_mod is always accessed from the same cgw_job object within + * the same RCU read section. Once cgw_job is scheduled for removal, + * cf_mod can also be removed without mandating an additional grace period. + */ + kfree(rcu_access_pointer(gwj->cf_mod)); kmem_cache_free(cgw_cache, gwj); } +/* Return cgw_job::cf_mod with RTNL protected section */ +static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj) +{ + return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked()); +} + static int cgw_notifier(struct notifier_block *nb, unsigned long msg, void *ptr) { @@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, { struct rtcanmsg *rtcan; struct nlmsghdr *nlh; + struct cf_mod *mod; nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags); if (!nlh) @@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, goto cancel; } + mod = cgw_job_cf_mod(gwj); if (gwj->flags & CGW_FLAGS_CAN_FD) { struct cgw_fdframe_mod mb; - if (gwj->mod.modtype.and) { - memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.and; + if (mod->modtype.and) { + memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); + mb.modtype = mod->modtype.and; if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.or) { - memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.or; + if (mod->modtype.or) { + memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); + mb.modtype = mod->modtype.or; if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.xor) { - memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.xor; + if (mod->modtype.xor) { + memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); + mb.modtype = mod->modtype.xor; if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.set) { - memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.set; + if (mod->modtype.set) { + memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); + mb.modtype = mod->modtype.set; if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) goto cancel; } } else { struct cgw_frame_mod mb; - if (gwj->mod.modtype.and) { - memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.and; + if (mod->modtype.and) { + memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); + mb.modtype = mod->modtype.and; if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.or) { - memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.or; + if (mod->modtype.or) { + memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); + mb.modtype = mod->modtype.or; if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.xor) { - memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.xor; + if (mod->modtype.xor) { + memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); + mb.modtype = mod->modtype.xor; if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) goto cancel; } - if (gwj->mod.modtype.set) { - memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.set; + if (mod->modtype.set) { + memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); + mb.modtype = mod->modtype.set; if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) goto cancel; } } - if (gwj->mod.uid) { - if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0) + if (mod->uid) { + if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0) goto cancel; } - if (gwj->mod.csumfunc.crc8) { + if (mod->csumfunc.crc8) { if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, - &gwj->mod.csum.crc8) < 0) + &mod->csum.crc8) < 0) goto cancel; } - if (gwj->mod.csumfunc.xor) { + if (mod->csumfunc.xor) { if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, - &gwj->mod.csum.xor) < 0) + &mod->csum.xor) < 0) goto cancel; } @@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, struct net *net = sock_net(skb->sk); struct rtcanmsg *r; struct cgw_job *gwj; - struct cf_mod mod; + struct cf_mod *mod; struct can_can_gw ccgw; u8 limhops = 0; int err = 0; @@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, if (r->gwtype != CGW_TYPE_CAN_CAN) return -EINVAL; - err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); - if (err < 0) - return err; + mod = kmalloc(sizeof(*mod), GFP_KERNEL); + if (!mod) + return -ENOMEM; - if (mod.uid) { + err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); + if (err < 0) + goto out_free_cf; + + if (mod->uid) { ASSERT_RTNL(); /* check for updating an existing job with identical uid */ hlist_for_each_entry(gwj, &net->can.cgw_list, list) { - if (gwj->mod.uid != mod.uid) + struct cf_mod *old_cf; + + old_cf = cgw_job_cf_mod(gwj); + if (old_cf->uid != mod->uid) continue; /* interfaces & filters must be identical */ - if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) - return -EINVAL; + if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) { + err = -EINVAL; + goto out_free_cf; + } - /* update modifications with disabled softirq & quit */ - local_bh_disable(); - memcpy(&gwj->mod, &mod, sizeof(mod)); - local_bh_enable(); + rcu_assign_pointer(gwj->cf_mod, mod); + kfree_rcu_mightsleep(old_cf); return 0; } } /* ifindex == 0 is not allowed for job creation */ - if (!ccgw.src_idx || !ccgw.dst_idx) - return -ENODEV; + if (!ccgw.src_idx || !ccgw.dst_idx) { + err = -ENODEV; + goto out_free_cf; + } gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL); - if (!gwj) - return -ENOMEM; + if (!gwj) { + err = -ENOMEM; + goto out_free_cf; + } gwj->handled_frames = 0; gwj->dropped_frames = 0; @@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, gwj->limit_hops = limhops; /* insert already parsed information */ - memcpy(&gwj->mod, &mod, sizeof(mod)); + RCU_INIT_POINTER(gwj->cf_mod, mod); memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw)); err = -ENODEV; @@ -1145,9 +1171,11 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, if (!err) hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); out: - if (err) + if (err) { kmem_cache_free(cgw_cache, gwj); - +out_free_cf: + kfree(mod); + } return err; } @@ -1207,19 +1235,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, /* remove only the first matching entry */ hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { + struct cf_mod *cf_mod; + if (gwj->flags != r->flags) continue; if (gwj->limit_hops != limhops) continue; + cf_mod = cgw_job_cf_mod(gwj); /* we have a match when uid is enabled and identical */ - if (gwj->mod.uid || mod.uid) { - if (gwj->mod.uid != mod.uid) + if (cf_mod->uid || mod.uid) { + if (cf_mod->uid != mod.uid) continue; } else { /* no uid => check for identical modifications */ - if (memcmp(&gwj->mod, &mod, sizeof(mod))) + if (memcmp(cf_mod, &mod, sizeof(mod))) continue; } diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index 58909b36561a..0a4267a24263 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -1132,7 +1132,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk, todo_size = size; - while (todo_size) { + do { struct j1939_sk_buff_cb *skcb; segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE, @@ -1177,7 +1177,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk, todo_size -= segment_size; session->total_queued_size += segment_size; - } + } while (todo_size); switch (ret) { case 0: /* OK */ diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c index 5d2097e5ca3a..76d625c668e0 100644 --- a/net/can/j1939/transport.c +++ b/net/can/j1939/transport.c @@ -382,8 +382,9 @@ sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session, skb_queue_walk(&session->skb_queue, do_skb) { do_skcb = j1939_skb_to_cb(do_skb); - if (offset_start >= do_skcb->offset && - offset_start < (do_skcb->offset + do_skb->len)) { + if ((offset_start >= do_skcb->offset && + offset_start < (do_skcb->offset + do_skb->len)) || + (offset_start == 0 && do_skcb->offset == 0 && do_skb->len == 0)) { skb = do_skb; } } @@ -1505,7 +1506,7 @@ static struct j1939_session *j1939_session_new(struct j1939_priv *priv, session->state = J1939_SESSION_NEW; skb_queue_head_init(&session->skb_queue); - skb_queue_tail(&session->skb_queue, skb); + skb_queue_tail(&session->skb_queue, skb_get(skb)); skcb = j1939_skb_to_cb(skb); memcpy(&session->skcb, skcb, sizeof(session->skcb)); diff --git a/net/can/proc.c b/net/can/proc.c index bbce97825f13..25fdf060e30d 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -118,6 +118,13 @@ void can_stat_update(struct timer_list *t) struct can_pkg_stats *pkg_stats = net->can.pkg_stats; unsigned long j = jiffies; /* snapshot */ + long rx_frames = atomic_long_read(&pkg_stats->rx_frames); + long tx_frames = atomic_long_read(&pkg_stats->tx_frames); + long matches = atomic_long_read(&pkg_stats->matches); + long rx_frames_delta = atomic_long_read(&pkg_stats->rx_frames_delta); + long tx_frames_delta = atomic_long_read(&pkg_stats->tx_frames_delta); + long matches_delta = atomic_long_read(&pkg_stats->matches_delta); + /* restart counting in timer context on user request */ if (user_reset) can_init_stats(net); @@ -127,35 +134,33 @@ void can_stat_update(struct timer_list *t) can_init_stats(net); /* prevent overflow in calc_rate() */ - if (pkg_stats->rx_frames > (ULONG_MAX / HZ)) + if (rx_frames > (LONG_MAX / HZ)) can_init_stats(net); /* prevent overflow in calc_rate() */ - if (pkg_stats->tx_frames > (ULONG_MAX / HZ)) + if (tx_frames > (LONG_MAX / HZ)) can_init_stats(net); /* matches overflow - very improbable */ - if (pkg_stats->matches > (ULONG_MAX / 100)) + if (matches > (LONG_MAX / 100)) can_init_stats(net); /* calc total values */ - if (pkg_stats->rx_frames) - pkg_stats->total_rx_match_ratio = (pkg_stats->matches * 100) / - pkg_stats->rx_frames; + if (rx_frames) + pkg_stats->total_rx_match_ratio = (matches * 100) / rx_frames; pkg_stats->total_tx_rate = calc_rate(pkg_stats->jiffies_init, j, - pkg_stats->tx_frames); + tx_frames); pkg_stats->total_rx_rate = calc_rate(pkg_stats->jiffies_init, j, - pkg_stats->rx_frames); + rx_frames); /* calc current values */ - if (pkg_stats->rx_frames_delta) + if (rx_frames_delta) pkg_stats->current_rx_match_ratio = - (pkg_stats->matches_delta * 100) / - pkg_stats->rx_frames_delta; + (matches_delta * 100) / rx_frames_delta; - pkg_stats->current_tx_rate = calc_rate(0, HZ, pkg_stats->tx_frames_delta); - pkg_stats->current_rx_rate = calc_rate(0, HZ, pkg_stats->rx_frames_delta); + pkg_stats->current_tx_rate = calc_rate(0, HZ, tx_frames_delta); + pkg_stats->current_rx_rate = calc_rate(0, HZ, rx_frames_delta); /* check / update maximum values */ if (pkg_stats->max_tx_rate < pkg_stats->current_tx_rate) @@ -168,9 +173,9 @@ void can_stat_update(struct timer_list *t) pkg_stats->max_rx_match_ratio = pkg_stats->current_rx_match_ratio; /* clear values for 'current rate' calculation */ - pkg_stats->tx_frames_delta = 0; - pkg_stats->rx_frames_delta = 0; - pkg_stats->matches_delta = 0; + atomic_long_set(&pkg_stats->tx_frames_delta, 0); + atomic_long_set(&pkg_stats->rx_frames_delta, 0); + atomic_long_set(&pkg_stats->matches_delta, 0); /* restart timer (one second) */ mod_timer(&net->can.stattimer, round_jiffies(jiffies + HZ)); @@ -214,9 +219,12 @@ static int can_stats_proc_show(struct seq_file *m, void *v) struct can_rcv_lists_stats *rcv_lists_stats = net->can.rcv_lists_stats; seq_putc(m, '\n'); - seq_printf(m, " %8ld transmitted frames (TXF)\n", pkg_stats->tx_frames); - seq_printf(m, " %8ld received frames (RXF)\n", pkg_stats->rx_frames); - seq_printf(m, " %8ld matched frames (RXMF)\n", pkg_stats->matches); + seq_printf(m, " %8ld transmitted frames (TXF)\n", + atomic_long_read(&pkg_stats->tx_frames)); + seq_printf(m, " %8ld received frames (RXF)\n", + atomic_long_read(&pkg_stats->rx_frames)); + seq_printf(m, " %8ld matched frames (RXMF)\n", + atomic_long_read(&pkg_stats->matches)); seq_putc(m, '\n'); diff --git a/net/core/dev.c b/net/core/dev.c index 42c16b3e86b9..212a909b4840 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -921,6 +921,12 @@ out: return ret; } +static bool dev_addr_cmp(struct net_device *dev, unsigned short type, + const char *ha) +{ + return dev->type == type && !memcmp(dev->dev_addr, ha, dev->addr_len); +} + /** * dev_getbyhwaddr_rcu - find a device by its hardware address * @net: the applicable net namespace @@ -929,7 +935,7 @@ out: * * Search for an interface by MAC address. Returns NULL if the device * is not found or a pointer to the device. - * The caller must hold RCU or RTNL. + * The caller must hold RCU. * The returned device has not had its ref count increased * and the caller must therefore be careful about locking * @@ -941,14 +947,39 @@ struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, struct net_device *dev; for_each_netdev_rcu(net, dev) - if (dev->type == type && - !memcmp(dev->dev_addr, ha, dev->addr_len)) + if (dev_addr_cmp(dev, type, ha)) return dev; return NULL; } EXPORT_SYMBOL(dev_getbyhwaddr_rcu); +/** + * dev_getbyhwaddr() - find a device by its hardware address + * @net: the applicable net namespace + * @type: media type of device + * @ha: hardware address + * + * Similar to dev_getbyhwaddr_rcu(), but the owner needs to hold + * rtnl_lock. + * + * Context: rtnl_lock() must be held. + * Return: pointer to the net_device, or NULL if not found + */ +struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, + const char *ha) +{ + struct net_device *dev; + + ASSERT_RTNL(); + for_each_netdev(net, dev) + if (dev_addr_cmp(dev, type, ha)) + return dev; + + return NULL; +} +EXPORT_SYMBOL(dev_getbyhwaddr); + struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) { struct net_device *dev, *ret = NULL; @@ -3679,8 +3710,10 @@ int skb_csum_hwoffload_help(struct sk_buff *skb, if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) && - skb_network_header_len(skb) != sizeof(struct ipv6hdr)) + skb_network_header_len(skb) != sizeof(struct ipv6hdr) && + !ipv6_has_hopopt_jumbo(skb)) goto sw_checksum; + switch (skb->csum_offset) { case offsetof(struct tcphdr, check): case offsetof(struct udphdr, check): @@ -9991,6 +10024,54 @@ void netif_tx_stop_all_queues(struct net_device *dev) } EXPORT_SYMBOL(netif_tx_stop_all_queues); +static int netdev_do_alloc_pcpu_stats(struct net_device *dev) +{ + void __percpu *v; + + /* Drivers implementing ndo_get_peer_dev must support tstat + * accounting, so that skb_do_redirect() can bump the dev's + * RX stats upon network namespace switch. + */ + if (dev->netdev_ops->ndo_get_peer_dev && + dev->pcpu_stat_type != NETDEV_PCPU_STAT_TSTATS) + return -EOPNOTSUPP; + + switch (dev->pcpu_stat_type) { + case NETDEV_PCPU_STAT_NONE: + return 0; + case NETDEV_PCPU_STAT_LSTATS: + v = dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats); + break; + case NETDEV_PCPU_STAT_TSTATS: + v = dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + break; + case NETDEV_PCPU_STAT_DSTATS: + v = dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats); + break; + default: + return -EINVAL; + } + + return v ? 0 : -ENOMEM; +} + +static void netdev_do_free_pcpu_stats(struct net_device *dev) +{ + switch (dev->pcpu_stat_type) { + case NETDEV_PCPU_STAT_NONE: + return; + case NETDEV_PCPU_STAT_LSTATS: + free_percpu(dev->lstats); + break; + case NETDEV_PCPU_STAT_TSTATS: + free_percpu(dev->tstats); + break; + case NETDEV_PCPU_STAT_DSTATS: + free_percpu(dev->dstats); + break; + } +} + /** * register_netdevice() - register a network device * @dev: device to register @@ -10051,11 +10132,15 @@ int register_netdevice(struct net_device *dev) goto err_uninit; } + ret = netdev_do_alloc_pcpu_stats(dev); + if (ret) + goto err_uninit; + ret = -EBUSY; if (!dev->ifindex) dev->ifindex = dev_new_index(net); else if (__dev_get_by_index(net, dev->ifindex)) - goto err_uninit; + goto err_free_pcpu; /* Transfer changeable features to wanted_features and enable * software offloads (GSO and GRO). @@ -10102,14 +10187,14 @@ int register_netdevice(struct net_device *dev) ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); ret = notifier_to_errno(ret); if (ret) - goto err_uninit; + goto err_free_pcpu; ret = netdev_register_kobject(dev); write_lock(&dev_base_lock); dev->reg_state = ret ? NETREG_UNREGISTERED : NETREG_REGISTERED; write_unlock(&dev_base_lock); if (ret) - goto err_uninit; + goto err_free_pcpu; __netdev_update_features(dev); @@ -10156,6 +10241,8 @@ int register_netdevice(struct net_device *dev) out: return ret; +err_free_pcpu: + netdev_do_free_pcpu_stats(dev); err_uninit: if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); @@ -10409,6 +10496,7 @@ void netdev_run_todo(void) WARN_ON(rcu_access_pointer(dev->ip_ptr)); WARN_ON(rcu_access_pointer(dev->ip6_ptr)); + netdev_do_free_pcpu_stats(dev); if (dev->priv_destructor) dev->priv_destructor(dev); if (dev->needs_free_netdev) diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 522657b597d9..fef94f3b03de 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -1716,30 +1716,30 @@ static int __init init_net_drop_monitor(void) return -ENOSPC; } - rc = genl_register_family(&net_drop_monitor_family); - if (rc) { - pr_err("Could not create drop monitor netlink family\n"); - return rc; - } - WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT); - - rc = register_netdevice_notifier(&dropmon_net_notifier); - if (rc < 0) { - pr_crit("Failed to register netdevice notifier\n"); - goto out_unreg; - } - - rc = 0; - for_each_possible_cpu(cpu) { net_dm_cpu_data_init(cpu); net_dm_hw_cpu_data_init(cpu); } + rc = register_netdevice_notifier(&dropmon_net_notifier); + if (rc < 0) { + pr_crit("Failed to register netdevice notifier\n"); + return rc; + } + + rc = genl_register_family(&net_drop_monitor_family); + if (rc) { + pr_err("Could not create drop monitor netlink family\n"); + goto out_unreg; + } + WARN_ON(net_drop_monitor_family.mcgrp_offset != NET_DM_GRP_ALERT); + + rc = 0; + goto out; out_unreg: - genl_unregister_family(&net_drop_monitor_family); + WARN_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); out: return rc; } @@ -1748,19 +1748,18 @@ static void exit_net_drop_monitor(void) { int cpu; - BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); - /* * Because of the module_get/put we do in the trace state change path * we are guaranteed not to have any current users when we get here */ + BUG_ON(genl_unregister_family(&net_drop_monitor_family)); + + BUG_ON(unregister_netdevice_notifier(&dropmon_net_notifier)); for_each_possible_cpu(cpu) { net_dm_hw_cpu_data_fini(cpu); net_dm_cpu_data_fini(cpu); } - - BUG_ON(genl_unregister_family(&net_drop_monitor_family)); } module_init(init_net_drop_monitor); diff --git a/net/core/filter.c b/net/core/filter.c index 2f6fef5f5864..497b41ac399d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -212,24 +212,36 @@ BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) return 0; } +static int bpf_skb_load_helper_convert_offset(const struct sk_buff *skb, int offset) +{ + if (likely(offset >= 0)) + return offset; + + if (offset >= SKF_NET_OFF) + return offset - SKF_NET_OFF + skb_network_offset(skb); + + if (offset >= SKF_LL_OFF && skb_mac_header_was_set(skb)) + return offset - SKF_LL_OFF + skb_mac_offset(skb); + + return INT_MIN; +} + BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { - u8 tmp, *ptr; + u8 tmp; const int len = sizeof(tmp); - if (offset >= 0) { - if (headlen - offset >= len) - return *(u8 *)(data + offset); - if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) - return tmp; - } else { - ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); - if (likely(ptr)) - return *(u8 *)ptr; - } + offset = bpf_skb_load_helper_convert_offset(skb, offset); + if (offset == INT_MIN) + return -EFAULT; - return -EFAULT; + if (headlen - offset >= len) + return *(u8 *)(data + offset); + if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) + return tmp; + else + return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, @@ -242,21 +254,19 @@ BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { - __be16 tmp, *ptr; + __be16 tmp; const int len = sizeof(tmp); - if (offset >= 0) { - if (headlen - offset >= len) - return get_unaligned_be16(data + offset); - if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) - return be16_to_cpu(tmp); - } else { - ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); - if (likely(ptr)) - return get_unaligned_be16(ptr); - } + offset = bpf_skb_load_helper_convert_offset(skb, offset); + if (offset == INT_MIN) + return -EFAULT; - return -EFAULT; + if (headlen - offset >= len) + return get_unaligned_be16(data + offset); + if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) + return be16_to_cpu(tmp); + else + return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, @@ -269,21 +279,19 @@ BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { - __be32 tmp, *ptr; + __be32 tmp; const int len = sizeof(tmp); - if (likely(offset >= 0)) { - if (headlen - offset >= len) - return get_unaligned_be32(data + offset); - if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) - return be32_to_cpu(tmp); - } else { - ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); - if (likely(ptr)) - return get_unaligned_be32(ptr); - } + offset = bpf_skb_load_helper_convert_offset(skb, offset); + if (offset == INT_MIN) + return -EFAULT; - return -EFAULT; + if (headlen - offset >= len) + return get_unaligned_be32(data + offset); + if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) + return be32_to_cpu(tmp); + else + return -EFAULT; } BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, @@ -2491,6 +2499,8 @@ int skb_do_redirect(struct sk_buff *skb) net_eq(net, dev_net(dev)))) goto out_drop; skb->dev = dev; + dev_sw_netstats_rx_add(dev, skb->len); + skb_scrub_packet(skb, false); return -EAGAIN; } return flags & BPF_F_NEIGH ? @@ -2594,18 +2604,16 @@ BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) static void sk_msg_reset_curr(struct sk_msg *msg) { - u32 i = msg->sg.start; - u32 len = 0; + if (!msg->sg.size) { + msg->sg.curr = msg->sg.start; + msg->sg.copybreak = 0; + } else { + u32 i = msg->sg.end; - do { - len += sk_msg_elem(msg, i)->length; - sk_msg_iter_var_next(i); - if (len >= msg->sg.size) - break; - } while (i != msg->sg.end); - - msg->sg.curr = i; - msg->sg.copybreak = 0; + sk_msg_iter_var_prev(i); + msg->sg.curr = i; + msg->sg.copybreak = msg->sg.data[i].length; + } } static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { @@ -2768,7 +2776,7 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, sk_msg_iter_var_next(i); } while (i != msg->sg.end); - if (start >= offset + l) + if (start > offset + l) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); @@ -2793,6 +2801,8 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, raw = page_address(page); + if (i == msg->sg.end) + sk_msg_iter_var_prev(i); psge = sk_msg_elem(msg, i); front = start - offset; back = psge->length - front; @@ -2809,7 +2819,13 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, } put_page(sg_page(psge)); - } else if (start - offset) { + new = i; + goto place_new; + } + + if (start - offset) { + if (i == msg->sg.end) + sk_msg_iter_var_prev(i); psge = sk_msg_elem(msg, i); rsge = sk_msg_elem_cpy(msg, i); @@ -2820,39 +2836,44 @@ BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, sk_msg_iter_var_next(i); sg_unmark_end(psge); sg_unmark_end(&rsge); - sk_msg_iter_next(msg, end); } /* Slot(s) to place newly allocated data */ + sk_msg_iter_next(msg, end); new = i; + sk_msg_iter_var_next(i); + + if (i == msg->sg.end) { + if (!rsge.length) + goto place_new; + sk_msg_iter_next(msg, end); + goto place_new; + } /* Shift one or two slots as needed */ - if (!copy) { - sge = sk_msg_elem_cpy(msg, i); + sge = sk_msg_elem_cpy(msg, new); + sg_unmark_end(&sge); + nsge = sk_msg_elem_cpy(msg, i); + if (rsge.length) { sk_msg_iter_var_next(i); - sg_unmark_end(&sge); + nnsge = sk_msg_elem_cpy(msg, i); sk_msg_iter_next(msg, end); + } - nsge = sk_msg_elem_cpy(msg, i); + while (i != msg->sg.end) { + msg->sg.data[i] = sge; + sge = nsge; + sk_msg_iter_var_next(i); if (rsge.length) { - sk_msg_iter_var_next(i); + nsge = nnsge; nnsge = sk_msg_elem_cpy(msg, i); - } - - while (i != msg->sg.end) { - msg->sg.data[i] = sge; - sge = nsge; - sk_msg_iter_var_next(i); - if (rsge.length) { - nsge = nnsge; - nnsge = sk_msg_elem_cpy(msg, i); - } else { - nsge = sk_msg_elem_cpy(msg, i); - } + } else { + nsge = sk_msg_elem_cpy(msg, i); } } +place_new: /* Place newly allocated data buffer */ sk_mem_charge(msg->sk, len); msg->sg.size += len; @@ -2881,8 +2902,10 @@ static const struct bpf_func_proto bpf_msg_push_data_proto = { static void sk_msg_shift_left(struct sk_msg *msg, int i) { + struct scatterlist *sge = sk_msg_elem(msg, i); int prev; + put_page(sg_page(sge)); do { prev = i; sk_msg_iter_var_next(i); @@ -2919,6 +2942,9 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, if (unlikely(flags)) return -EINVAL; + if (unlikely(len == 0)) + return 0; + /* First find the starting scatterlist element */ i = msg->sg.start; do { @@ -2931,7 +2957,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, } while (i != msg->sg.end); /* Bounds checks: start and pop must be inside message */ - if (start >= offset + l || last >= msg->sg.size) + if (start >= offset + l || last > msg->sg.size) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); @@ -2960,12 +2986,12 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, */ if (start != offset) { struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); - int a = start; + int a = start - offset; int b = sge->length - pop - a; sk_msg_iter_var_next(i); - if (pop < sge->length - a) { + if (b > 0) { if (space) { sge->length = a; sk_msg_shift_right(msg, i); @@ -2984,7 +3010,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, if (unlikely(!page)) return -ENOMEM; - sge->length = a; orig = sg_page(sge); from = sg_virt(sge); to = page_address(page); @@ -2994,7 +3019,7 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, put_page(orig); } pop = 0; - } else if (pop >= sge->length - a) { + } else { pop -= (sge->length - a); sge->length = a; } @@ -3028,7 +3053,6 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, pop -= sge->length; sk_msg_shift_left(msg, i); } - sk_msg_iter_var_next(i); } sk_mem_uncharge(msg->sk, len - pop); @@ -3680,13 +3704,22 @@ static const struct bpf_func_proto bpf_skb_adjust_room_proto = { static u32 __bpf_skb_min_len(const struct sk_buff *skb) { - u32 min_len = skb_network_offset(skb); + int offset = skb_network_offset(skb); + u32 min_len = 0; - if (skb_transport_header_was_set(skb)) - min_len = skb_transport_offset(skb); - if (skb->ip_summed == CHECKSUM_PARTIAL) - min_len = skb_checksum_start_offset(skb) + - skb->csum_offset + sizeof(__sum16); + if (offset > 0) + min_len = offset; + if (skb_transport_header_was_set(skb)) { + offset = skb_transport_offset(skb); + if (offset > 0) + min_len = offset; + } + if (skb->ip_summed == CHECKSUM_PARTIAL) { + offset = skb_checksum_start_offset(skb) + + skb->csum_offset + sizeof(__sum16); + if (offset > 0) + min_len = offset; + } return min_len; } @@ -6219,7 +6252,8 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_INT, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, + .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6230,7 +6264,8 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = { .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, - .arg3_type = ARG_PTR_TO_INT, + .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED, + .arg3_size = sizeof(u32), .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7503,7 +7538,7 @@ static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = { .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, - .arg2_type = ARG_PTR_TO_MEM, + .arg2_type = ARG_PTR_TO_MEM | MEM_WRITE, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; @@ -11083,6 +11118,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; struct sock_reuseport *reuse; struct sock *selected_sk; + int err; selected_sk = map->ops->map_lookup_elem(map, key); if (!selected_sk) @@ -11090,10 +11126,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, reuse = rcu_dereference(selected_sk->sk_reuseport_cb); if (!reuse) { - /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ - if (sk_is_refcounted(selected_sk)) - sock_put(selected_sk); - /* reuseport_array has only sk with non NULL sk_reuseport_cb. * The only (!reuse) case here is - the sk has already been * unhashed (e.g. by close()), so treat it as -ENOENT. @@ -11101,24 +11133,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, * Other maps (e.g. sock_map) do not provide this guarantee and * the sk may never be in the reuseport group to begin with. */ - return is_sockarray ? -ENOENT : -EINVAL; + err = is_sockarray ? -ENOENT : -EINVAL; + goto error; } if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { struct sock *sk = reuse_kern->sk; - if (sk->sk_protocol != selected_sk->sk_protocol) - return -EPROTOTYPE; - else if (sk->sk_family != selected_sk->sk_family) - return -EAFNOSUPPORT; - - /* Catch all. Likely bound to a different sockaddr. */ - return -EBADFD; + if (sk->sk_protocol != selected_sk->sk_protocol) { + err = -EPROTOTYPE; + } else if (sk->sk_family != selected_sk->sk_family) { + err = -EAFNOSUPPORT; + } else { + /* Catch all. Likely bound to a different sockaddr. */ + err = -EBADFD; + } + goto error; } reuse_kern->selected_sk = selected_sk; return 0; +error: + /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ + if (sk_is_refcounted(selected_sk)) + sock_put(selected_sk); + + return err; } static const struct bpf_func_proto sk_select_reuseport_proto = { diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index fba8eb1bb281..5f50e182acd5 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -751,23 +751,30 @@ __skb_flow_dissect_ports(const struct sk_buff *skb, void *target_container, const void *data, int nhoff, u8 ip_proto, int hlen) { - enum flow_dissector_key_id dissector_ports = FLOW_DISSECTOR_KEY_MAX; - struct flow_dissector_key_ports *key_ports; + struct flow_dissector_key_ports_range *key_ports_range = NULL; + struct flow_dissector_key_ports *key_ports = NULL; + __be32 ports; if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) - dissector_ports = FLOW_DISSECTOR_KEY_PORTS; - else if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS_RANGE)) - dissector_ports = FLOW_DISSECTOR_KEY_PORTS_RANGE; + key_ports = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS, + target_container); - if (dissector_ports == FLOW_DISSECTOR_KEY_MAX) + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS_RANGE)) + key_ports_range = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE, + target_container); + + if (!key_ports && !key_ports_range) return; - key_ports = skb_flow_dissector_target(flow_dissector, - dissector_ports, - target_container); - key_ports->ports = __skb_flow_get_ports(skb, nhoff, ip_proto, - data, hlen); + ports = __skb_flow_get_ports(skb, nhoff, ip_proto, data, hlen); + + if (key_ports) + key_ports->ports = ports; + + if (key_ports_range) + key_ports_range->tp.ports = ports; } static void @@ -822,6 +829,7 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, struct flow_dissector *flow_dissector, void *target_container) { + struct flow_dissector_key_ports_range *key_ports_range = NULL; struct flow_dissector_key_ports *key_ports = NULL; struct flow_dissector_key_control *key_control; struct flow_dissector_key_basic *key_basic; @@ -866,20 +874,21 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } - if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS)) { key_ports = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_PORTS, target_container); - else if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS_RANGE)) - key_ports = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_PORTS_RANGE, - target_container); - - if (key_ports) { key_ports->src = flow_keys->sport; key_ports->dst = flow_keys->dport; } + if (dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE)) { + key_ports_range = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_PORTS_RANGE, + target_container); + key_ports_range->tp.src = flow_keys->sport; + key_ports_range->tp.dst = flow_keys->dport; + } if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_FLOW_LABEL)) { @@ -1004,10 +1013,12 @@ bool __skb_flow_dissect(const struct net *net, FLOW_DISSECTOR_KEY_BASIC, target_container); + rcu_read_lock(); + if (skb) { if (!net) { if (skb->dev) - net = dev_net(skb->dev); + net = dev_net_rcu(skb->dev); else if (skb->sk) net = sock_net(skb->sk); } @@ -1018,7 +1029,6 @@ bool __skb_flow_dissect(const struct net *net, enum netns_bpf_attach_type type = NETNS_BPF_FLOW_DISSECTOR; struct bpf_prog_array *run_array; - rcu_read_lock(); run_array = rcu_dereference(init_net.bpf.run_array[type]); if (!run_array) run_array = rcu_dereference(net->bpf.run_array[type]); @@ -1046,17 +1056,17 @@ bool __skb_flow_dissect(const struct net *net, prog = READ_ONCE(run_array->items[0].prog); result = bpf_flow_dissect(prog, &ctx, n_proto, nhoff, hlen, flags); - if (result == BPF_FLOW_DISSECTOR_CONTINUE) - goto dissect_continue; - __skb_flow_bpf_to_target(&flow_keys, flow_dissector, - target_container); - rcu_read_unlock(); - return result == BPF_OK; + if (result != BPF_FLOW_DISSECTOR_CONTINUE) { + __skb_flow_bpf_to_target(&flow_keys, flow_dissector, + target_container); + rcu_read_unlock(); + return result == BPF_OK; + } } -dissect_continue: - rcu_read_unlock(); } + rcu_read_unlock(); + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct ethhdr *eth = eth_hdr(skb); diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 4fcbdd71c59f..e0654e3a9286 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -206,7 +206,7 @@ void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est) { struct net_rate_estimator *est; - est = xchg((__force struct net_rate_estimator **)rate_est, NULL); + est = unrcu_pointer(xchg(rate_est, NULL)); if (est) { del_timer_sync(&est->timer); kfree_rcu(est, rcu); diff --git a/net/core/gro.c b/net/core/gro.c index 47118e97ecfd..c4cbf398c5f7 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -679,6 +679,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) skb->pkt_type = PACKET_HOST; skb->encapsulation = 0; + skb->ip_summed = CHECKSUM_NONE; skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_size = 0; if (unlikely(skb->slow_gro)) { diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c index 711cd3b4347a..f63586c9ce02 100644 --- a/net/core/lwtunnel.c +++ b/net/core/lwtunnel.c @@ -23,6 +23,8 @@ #include #include +#include "dev.h" + DEFINE_STATIC_KEY_FALSE(nf_hooks_lwtunnel_enabled); EXPORT_SYMBOL_GPL(nf_hooks_lwtunnel_enabled); @@ -325,82 +327,132 @@ EXPORT_SYMBOL_GPL(lwtunnel_cmp_encap); int lwtunnel_output(struct net *net, struct sock *sk, struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; - if (!dst) + local_bh_disable(); + + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; goto drop; + } + + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; + goto drop; + } lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || - lwtstate->type > LWTUNNEL_ENCAP_MAX) - return 0; + lwtstate->type > LWTUNNEL_ENCAP_MAX) { + ret = 0; + goto out; + } ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->output)) + if (likely(ops && ops->output)) { + dev_xmit_recursion_inc(); ret = ops->output(net, sk, skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) goto drop; - return ret; + goto out; drop: kfree_skb(skb); +out: + local_bh_enable(); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_output); int lwtunnel_xmit(struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; - if (!dst) + local_bh_disable(); + + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; goto drop; + } + + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; + goto drop; + } lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || - lwtstate->type > LWTUNNEL_ENCAP_MAX) - return 0; + lwtstate->type > LWTUNNEL_ENCAP_MAX) { + ret = 0; + goto out; + } ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->xmit)) + if (likely(ops && ops->xmit)) { + dev_xmit_recursion_inc(); ret = ops->xmit(skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) goto drop; - return ret; + goto out; drop: kfree_skb(skb); +out: + local_bh_enable(); return ret; } EXPORT_SYMBOL_GPL(lwtunnel_xmit); int lwtunnel_input(struct sk_buff *skb) { - struct dst_entry *dst = skb_dst(skb); const struct lwtunnel_encap_ops *ops; struct lwtunnel_state *lwtstate; - int ret = -EINVAL; + struct dst_entry *dst; + int ret; - if (!dst) + DEBUG_NET_WARN_ON_ONCE(!in_softirq()); + + if (dev_xmit_recursion()) { + net_crit_ratelimited("%s(): recursion limit reached on datapath\n", + __func__); + ret = -ENETDOWN; goto drop; + } + + dst = skb_dst(skb); + if (!dst) { + ret = -EINVAL; + goto drop; + } lwtstate = dst->lwtstate; if (lwtstate->type == LWTUNNEL_ENCAP_NONE || @@ -410,8 +462,11 @@ int lwtunnel_input(struct sk_buff *skb) ret = -EOPNOTSUPP; rcu_read_lock(); ops = rcu_dereference(lwtun_encaps[lwtstate->type]); - if (likely(ops && ops->input)) + if (likely(ops && ops->input)) { + dev_xmit_recursion_inc(); ret = ops->input(skb); + dev_xmit_recursion_dec(); + } rcu_read_unlock(); if (ret == -EOPNOTSUPP) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index c842f150c304..bcc3950638b9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2283,6 +2283,7 @@ static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = { static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = { [NDTPA_IFINDEX] = { .type = NLA_U32 }, [NDTPA_QUEUE_LEN] = { .type = NLA_U32 }, + [NDTPA_QUEUE_LENBYTES] = { .type = NLA_U32 }, [NDTPA_PROXY_QLEN] = { .type = NLA_U32 }, [NDTPA_APP_PROBES] = { .type = NLA_U32 }, [NDTPA_UCAST_PROBES] = { .type = NLA_U32 }, @@ -2865,6 +2866,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) err = neigh_valid_dump_req(nlh, cb->strict_check, &filter, cb->extack); if (err < 0 && cb->strict_check) return err; + err = 0; s_t = cb->args[0]; @@ -3497,10 +3499,12 @@ static const struct seq_operations neigh_stat_seq_ops = { static void __neigh_notify(struct neighbour *n, int type, int flags, u32 pid) { - struct net *net = dev_net(n->dev); struct sk_buff *skb; int err = -ENOBUFS; + struct net *net; + rcu_read_lock(); + net = dev_net_rcu(n->dev); skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC); if (skb == NULL) goto errout; @@ -3513,10 +3517,11 @@ static void __neigh_notify(struct neighbour *n, int type, int flags, goto errout; } rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC); - return; + goto out; errout: - if (err < 0) - rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); + rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); +out: + rcu_read_unlock(); } void neigh_app_ns(struct neighbour *n) diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 1d95a5adce4e..abf1e1751d6c 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -314,8 +314,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) { /* Must be called with pernet_ops_rwsem held */ const struct pernet_operations *ops, *saved_ops; - int error = 0; LIST_HEAD(net_exit_list); + LIST_HEAD(dev_kill_list); + int error = 0; refcount_set(&net->ns.count, 1); ref_tracker_dir_init(&net->refcnt_tracker, 128); @@ -353,6 +354,15 @@ out_undo: synchronize_rcu(); + ops = saved_ops; + rtnl_lock(); + list_for_each_entry_continue_reverse(ops, &pernet_list, list) { + if (ops->exit_batch_rtnl) + ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); + } + unregister_netdevice_many(&dev_kill_list); + rtnl_unlock(); + ops = saved_ops; list_for_each_entry_continue_reverse(ops, &pernet_list, list) ops_exit_list(ops, &net_exit_list); @@ -435,11 +445,28 @@ out_free: goto out; } +static LLIST_HEAD(defer_free_list); + +static void net_complete_free(void) +{ + struct llist_node *kill_list; + struct net *net, *next; + + /* Get the list of namespaces to free from last round. */ + kill_list = llist_del_all(&defer_free_list); + + llist_for_each_entry_safe(net, next, kill_list, defer_free_list) + kmem_cache_free(net_cachep, net); + +} + static void net_free(struct net *net) { if (refcount_dec_and_test(&net->passive)) { kfree(rcu_access_pointer(net->gen)); - kmem_cache_free(net_cachep, net); + + /* Wait for an extra rcu_barrier() before final free. */ + llist_add(&net->defer_free_list, &defer_free_list); } } @@ -559,6 +586,7 @@ static void cleanup_net(struct work_struct *work) struct net *net, *tmp, *last; struct llist_node *net_kill_list; LIST_HEAD(net_exit_list); + LIST_HEAD(dev_kill_list); /* Atomically snapshot the list of namespaces to cleanup */ net_kill_list = llist_del_all(&cleanup_list); @@ -599,6 +627,14 @@ static void cleanup_net(struct work_struct *work) */ synchronize_rcu(); + rtnl_lock(); + list_for_each_entry_reverse(ops, &pernet_list, list) { + if (ops->exit_batch_rtnl) + ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); + } + unregister_netdevice_many(&dev_kill_list); + rtnl_unlock(); + /* Run all of the network namespace exit methods */ list_for_each_entry_reverse(ops, &pernet_list, list) ops_exit_list(ops, &net_exit_list); @@ -614,6 +650,8 @@ static void cleanup_net(struct work_struct *work) */ rcu_barrier(); + net_complete_free(); + /* Finally it is safe to free my network namespace structure */ list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { list_del_init(&net->exit_list); @@ -1140,7 +1178,17 @@ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_ { ops_pre_exit_list(ops, net_exit_list); synchronize_rcu(); + + if (ops->exit_batch_rtnl) { + LIST_HEAD(dev_kill_list); + + rtnl_lock(); + ops->exit_batch_rtnl(net_exit_list, &dev_kill_list); + unregister_netdevice_many(&dev_kill_list); + rtnl_unlock(); + } ops_exit_list(ops, net_exit_list); + ops_free_list(ops, net_exit_list); } diff --git a/net/core/netpoll.c b/net/core/netpoll.c index fd2195cfcb4a..657abbb7d0d7 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -326,6 +326,7 @@ static int netpoll_owner_active(struct net_device *dev) static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { netdev_tx_t status = NETDEV_TX_BUSY; + netdev_tx_t ret = NET_XMIT_DROP; struct net_device *dev; unsigned long tries; /* It is up to the caller to keep npinfo alive. */ @@ -334,11 +335,12 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) lockdep_assert_irqs_disabled(); dev = np->dev; + rcu_read_lock(); npinfo = rcu_dereference_bh(dev->npinfo); if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { dev_kfree_skb_irq(skb); - return NET_XMIT_DROP; + goto out; } /* don't get messages out of order, and no recursion */ @@ -377,7 +379,10 @@ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) skb_queue_tail(&npinfo->txq, skb); schedule_delayed_work(&npinfo->tx_work,0); } - return NETDEV_TX_OK; + ret = NETDEV_TX_OK; +out: + rcu_read_unlock(); + return ret; } netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) @@ -636,7 +641,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) goto out; } - if (!ndev->npinfo) { + if (!rcu_access_pointer(ndev->npinfo)) { npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); if (!npinfo) { err = -ENOMEM; diff --git a/net/core/page_pool.c b/net/core/page_pool.c index caf6d950d54a..acc1d0d055cd 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -841,7 +841,13 @@ static void page_pool_release_retry(struct work_struct *wq) int inflight; inflight = page_pool_release(pool); - if (!inflight) + /* In rare cases, a driver bug may cause inflight to go negative. + * Don't reschedule release if inflight is 0 or negative. + * - If 0, the page_pool has been destroyed + * - if negative, we will never recover + * in both cases no reschedule is necessary. + */ + if (inflight <= 0) return; /* Periodic warning */ diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 471d4effa8b4..a2838c15aa9d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -850,6 +850,9 @@ static ssize_t get_imix_entries(const char __user *buffer, unsigned long weight; unsigned long size; + if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES) + return -E2BIG; + len = num_arg(&buffer[i], max_digits, &size); if (len < 0) return len; @@ -879,9 +882,6 @@ static ssize_t get_imix_entries(const char __user *buffer, i++; pkt_dev->n_imix_entries++; - - if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES) - return -E2BIG; } while (c == ' '); return i; @@ -897,6 +897,10 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) pkt_dev->nr_labels = 0; do { __u32 tmp; + + if (n >= MAX_MPLS_LABELS) + return -E2BIG; + len = hex32_arg(&buffer[i], 8, &tmp); if (len <= 0) return len; @@ -908,8 +912,6 @@ static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) return -EFAULT; i++; n++; - if (n >= MAX_MPLS_LABELS) - return -E2BIG; } while (c == ','); pkt_dev->nr_labels = n; @@ -1875,8 +1877,8 @@ static ssize_t pktgen_thread_write(struct file *file, i = len; /* Read variable name */ - - len = strn_len(&user_buffer[i], sizeof(name) - 1); + max = min(sizeof(name) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) return len; @@ -1906,7 +1908,8 @@ static ssize_t pktgen_thread_write(struct file *file, if (!strcmp(name, "add_device")) { char f[32]; memset(f, 0, 32); - len = strn_len(&user_buffer[i], sizeof(f) - 1); + max = min(sizeof(f) - 1, count - i); + len = strn_len(&user_buffer[i], max); if (len < 0) { ret = len; goto out; diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index afb52254a47e..6bc2f78a5ebb 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1007,6 +1007,9 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, /* IFLA_VF_STATS_TX_DROPPED */ nla_total_size_64bit(sizeof(__u64))); } + if (dev->netdev_ops->ndo_get_vf_guid) + size += num_vfs * 2 * + nla_total_size(sizeof(struct ifla_vf_guid)); return size; } else return 0; @@ -1939,7 +1942,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 }, [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 }, [IFLA_GSO_MAX_SEGS] = { .type = NLA_U32 }, - [IFLA_GSO_MAX_SIZE] = { .type = NLA_U32 }, + [IFLA_GSO_MAX_SIZE] = NLA_POLICY_MIN(NLA_U32, MAX_TCP_HEADER + 1), [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, [IFLA_CARRIER_CHANGES] = { .type = NLA_U32 }, /* ignored */ [IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN }, diff --git a/net/core/scm.c b/net/core/scm.c index a877c4ef4c25..cdd4e5befb14 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -36,6 +36,7 @@ #include #include #include +#include /* @@ -85,8 +86,15 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) return -ENOMEM; *fplp = fpl; fpl->count = 0; + fpl->count_unix = 0; fpl->max = SCM_MAX_FD; fpl->user = NULL; +#if IS_ENABLED(CONFIG_UNIX) + fpl->inflight = false; + fpl->dead = false; + fpl->edges = NULL; + INIT_LIST_HEAD(&fpl->vertices); +#endif } fpp = &fpl->fp[fpl->count]; @@ -109,6 +117,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) fput(file); return -EINVAL; } + if (unix_get_socket(file)) + fpl->count_unix++; + *fpp++ = file; fpl->count++; } @@ -367,8 +378,14 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) if (new_fpl) { for (i = 0; i < fpl->count; i++) get_file(fpl->fp[i]); + new_fpl->max = new_fpl->count; new_fpl->user = get_uid(fpl->user); +#if IS_ENABLED(CONFIG_UNIX) + new_fpl->inflight = false; + new_fpl->edges = NULL; + INIT_LIST_HEAD(&new_fpl->vertices); +#endif } return new_fpl; } diff --git a/net/core/selftests.c b/net/core/selftests.c index acb1ee97bbd3..7af99d07762e 100644 --- a/net/core/selftests.c +++ b/net/core/selftests.c @@ -100,10 +100,10 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev, ehdr->h_proto = htons(ETH_P_IP); if (attr->tcp) { + memset(thdr, 0, sizeof(*thdr)); thdr->source = htons(attr->sport); thdr->dest = htons(attr->dport); thdr->doff = sizeof(struct tcphdr) / 4; - thdr->check = 0; } else { uhdr->source = htons(attr->sport); uhdr->dest = htons(attr->dport); @@ -144,10 +144,18 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev, attr->id = net_test_next_id; shdr->id = net_test_next_id++; - if (attr->size) - skb_put(skb, attr->size); - if (attr->max_size && attr->max_size > skb->len) - skb_put(skb, attr->max_size - skb->len); + if (attr->size) { + void *payload = skb_put(skb, attr->size); + + memset(payload, 0, attr->size); + } + + if (attr->max_size && attr->max_size > skb->len) { + size_t pad_len = attr->max_size - skb->len; + void *pad = skb_put(skb, pad_len); + + memset(pad, 0, pad_len); + } skb->csum = 0; skb->ip_summed = CHECKSUM_PARTIAL; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 768b8d65a5ba..d8a3ada886ff 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5556,11 +5556,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet) skb->offload_fwd_mark = 0; skb->offload_l3_fwd_mark = 0; #endif + ipvs_reset(skb); if (!xnet) return; - ipvs_reset(skb); skb->mark = 0; skb_clear_tstamp(skb); } diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 746d950de0e1..5a790cd1121b 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -444,8 +444,10 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, if (likely(!peek)) { sge->offset += copy; sge->length -= copy; - if (!msg_rx->skb) + if (!msg_rx->skb) { sk_mem_uncharge(sk, copy); + atomic_sub(copy, &sk->sk_rmem_alloc); + } msg_rx->sg.size -= copy; if (!sge->length) { @@ -545,6 +547,9 @@ static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, return num_sge; } +#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) + psock->ingress_bytes += len; +#endif copied = len; msg->sg.start = 0; msg->sg.size = copied; @@ -771,6 +776,8 @@ static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { list_del(&msg->list); + if (!msg->skb) + atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc); sk_msg_free(psock->sk, msg); kfree(msg); } @@ -1114,9 +1121,9 @@ static void sk_psock_strp_data_ready(struct sock *sk) if (tls_sw_has_ctx_rx(sk)) { psock->saved_data_ready(sk); } else { - write_lock_bh(&sk->sk_callback_lock); + read_lock_bh(&sk->sk_callback_lock); strp_data_ready(&psock->strp); - write_unlock_bh(&sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); } } rcu_read_unlock(); @@ -1136,6 +1143,10 @@ int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) if (!ret) sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); + if (sk_is_tcp(sk)) { + psock->strp.cb.read_sock = tcp_bpf_strp_read_sock; + psock->copied_seq = tcp_sk(sk)->copied_seq; + } return ret; } diff --git a/net/core/sock.c b/net/core/sock.c index dce8f878f638..168e7f42c054 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1122,7 +1122,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname, sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE); break; case SO_REUSEPORT: - sk->sk_reuseport = valbool; + if (valbool && !sk_is_inet(sk)) + ret = -EOPNOTSUPP; + else + sk->sk_reuseport = valbool; break; case SO_TYPE: case SO_PROTOCOL: diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index e6ea6764d10a..17972558f54a 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -16,9 +16,10 @@ #include #include -static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; -static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); -static DEFINE_MUTEX(sock_diag_table_mutex); +static const struct sock_diag_handler __rcu *sock_diag_handlers[AF_MAX]; + +static const struct sock_diag_inet_compat __rcu *inet_rcv_compat; + static struct workqueue_struct *broadcast_wq; DEFINE_COOKIE(sock_cookie); @@ -119,6 +120,24 @@ static size_t sock_diag_nlmsg_size(void) + nla_total_size_64bit(sizeof(struct tcp_info))); /* INET_DIAG_INFO */ } +static const struct sock_diag_handler *sock_diag_lock_handler(int family) +{ + const struct sock_diag_handler *handler; + + rcu_read_lock(); + handler = rcu_dereference(sock_diag_handlers[family]); + if (handler && !try_module_get(handler->owner)) + handler = NULL; + rcu_read_unlock(); + + return handler; +} + +static void sock_diag_unlock_handler(const struct sock_diag_handler *handler) +{ + module_put(handler->owner); +} + static void sock_diag_broadcast_destroy_work(struct work_struct *work) { struct broadcast_sk *bsk = @@ -135,12 +154,12 @@ static void sock_diag_broadcast_destroy_work(struct work_struct *work) if (!skb) goto out; - mutex_lock(&sock_diag_table_mutex); - hndl = sock_diag_handlers[sk->sk_family]; - if (hndl && hndl->get_info) - err = hndl->get_info(skb, sk); - mutex_unlock(&sock_diag_table_mutex); - + hndl = sock_diag_lock_handler(sk->sk_family); + if (hndl) { + if (hndl->get_info) + err = hndl->get_info(skb, sk); + sock_diag_unlock_handler(hndl); + } if (!err) nlmsg_multicast(sock_net(sk)->diag_nlsk, skb, 0, group, GFP_KERNEL); @@ -163,51 +182,43 @@ void sock_diag_broadcast_destroy(struct sock *sk) queue_work(broadcast_wq, &bsk->work); } -void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) +void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr) { - mutex_lock(&sock_diag_table_mutex); - inet_rcv_compat = fn; - mutex_unlock(&sock_diag_table_mutex); + xchg(&inet_rcv_compat, RCU_INITIALIZER(ptr)); } EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat); -void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)) +void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr) { - mutex_lock(&sock_diag_table_mutex); - inet_rcv_compat = NULL; - mutex_unlock(&sock_diag_table_mutex); + const struct sock_diag_inet_compat *old; + + old = unrcu_pointer(xchg(&inet_rcv_compat, NULL)); + WARN_ON_ONCE(old != ptr); } EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); int sock_diag_register(const struct sock_diag_handler *hndl) { - int err = 0; + int family = hndl->family; - if (hndl->family >= AF_MAX) + if (family >= AF_MAX) return -EINVAL; - mutex_lock(&sock_diag_table_mutex); - if (sock_diag_handlers[hndl->family]) - err = -EBUSY; - else - WRITE_ONCE(sock_diag_handlers[hndl->family], hndl); - mutex_unlock(&sock_diag_table_mutex); - - return err; + return !cmpxchg((const struct sock_diag_handler **) + &sock_diag_handlers[family], + NULL, hndl) ? 0 : -EBUSY; } EXPORT_SYMBOL_GPL(sock_diag_register); -void sock_diag_unregister(const struct sock_diag_handler *hnld) +void sock_diag_unregister(const struct sock_diag_handler *hndl) { - int family = hnld->family; + int family = hndl->family; if (family >= AF_MAX) return; - mutex_lock(&sock_diag_table_mutex); - BUG_ON(sock_diag_handlers[family] != hnld); - WRITE_ONCE(sock_diag_handlers[family], NULL); - mutex_unlock(&sock_diag_table_mutex); + xchg((const struct sock_diag_handler **)&sock_diag_handlers[family], + NULL); } EXPORT_SYMBOL_GPL(sock_diag_unregister); @@ -224,20 +235,20 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh) return -EINVAL; req->sdiag_family = array_index_nospec(req->sdiag_family, AF_MAX); - if (READ_ONCE(sock_diag_handlers[req->sdiag_family]) == NULL) + if (!rcu_access_pointer(sock_diag_handlers[req->sdiag_family])) sock_load_diag_module(req->sdiag_family, 0); - mutex_lock(&sock_diag_table_mutex); - hndl = sock_diag_handlers[req->sdiag_family]; + hndl = sock_diag_lock_handler(req->sdiag_family); if (hndl == NULL) - err = -ENOENT; - else if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY) + return -ENOENT; + + if (nlh->nlmsg_type == SOCK_DIAG_BY_FAMILY) err = hndl->dump(skb, nlh); else if (nlh->nlmsg_type == SOCK_DESTROY && hndl->destroy) err = hndl->destroy(skb, nlh); else err = -EOPNOTSUPP; - mutex_unlock(&sock_diag_table_mutex); + sock_diag_unlock_handler(hndl); return err; } @@ -245,20 +256,27 @@ static int __sock_diag_cmd(struct sk_buff *skb, struct nlmsghdr *nlh) static int sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { + const struct sock_diag_inet_compat *ptr; int ret; switch (nlh->nlmsg_type) { case TCPDIAG_GETSOCK: case DCCPDIAG_GETSOCK: - if (inet_rcv_compat == NULL) + + if (!rcu_access_pointer(inet_rcv_compat)) sock_load_diag_module(AF_INET, 0); - mutex_lock(&sock_diag_table_mutex); - if (inet_rcv_compat != NULL) - ret = inet_rcv_compat(skb, nlh); - else - ret = -EOPNOTSUPP; - mutex_unlock(&sock_diag_table_mutex); + rcu_read_lock(); + ptr = rcu_dereference(inet_rcv_compat); + if (ptr && !try_module_get(ptr->owner)) + ptr = NULL; + rcu_read_unlock(); + + ret = -EOPNOTSUPP; + if (ptr) { + ret = ptr->fn(skb, nlh); + module_put(ptr->owner); + } return ret; case SOCK_DIAG_BY_FAMILY: @@ -283,12 +301,12 @@ static int sock_diag_bind(struct net *net, int group) switch (group) { case SKNLGRP_INET_TCP_DESTROY: case SKNLGRP_INET_UDP_DESTROY: - if (!READ_ONCE(sock_diag_handlers[AF_INET])) + if (!rcu_access_pointer(sock_diag_handlers[AF_INET])) sock_load_diag_module(AF_INET, 0); break; case SKNLGRP_INET6_TCP_DESTROY: case SKNLGRP_INET6_UDP_DESTROY: - if (!READ_ONCE(sock_diag_handlers[AF_INET6])) + if (!rcu_access_pointer(sock_diag_handlers[AF_INET6])) sock_load_diag_module(AF_INET6, 0); break; } diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 25b5abab60ed..f4cca477b047 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -158,6 +158,7 @@ static void sock_map_del_link(struct sock *sk, verdict_stop = true; list_del(&link->list); sk_psock_free_link(link); + break; } } spin_unlock_bh(&psock->link_lock); @@ -410,15 +411,15 @@ static void *sock_map_lookup_sys(struct bpf_map *map, void *key) static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test, struct sock **psk) { - struct sock *sk; + struct sock *sk = NULL; int err = 0; if (irqs_disabled()) return -EOPNOTSUPP; /* locks here are hardirq-unsafe */ raw_spin_lock_bh(&stab->lock); - sk = *psk; - if (!sk_test || sk_test == sk) + + if (!sk_test || sk_test == *psk) sk = xchg(psk, NULL); if (likely(sk)) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index d281d5343ff4..75efc712bb9b 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -30,6 +30,7 @@ static int min_sndbuf = SOCK_MIN_SNDBUF; static int min_rcvbuf = SOCK_MIN_RCVBUF; static int max_skb_frags = MAX_SKB_FRAGS; static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE; +static int netdev_budget_usecs_min = 2 * USEC_PER_SEC / HZ; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -238,7 +239,7 @@ static int proc_do_dev_weight(struct ctl_table *table, int write, int ret, weight; mutex_lock(&dev_weight_mutex); - ret = proc_dointvec(table, write, buffer, lenp, ppos); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (!ret && write) { weight = READ_ONCE(weight_p); WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias); @@ -363,6 +364,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_dev_weight, + .extra1 = SYSCTL_ONE, }, { .procname = "dev_weight_rx_bias", @@ -370,6 +372,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_dev_weight, + .extra1 = SYSCTL_ONE, }, { .procname = "dev_weight_tx_bias", @@ -377,6 +380,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_do_dev_weight, + .extra1 = SYSCTL_ONE, }, { .procname = "netdev_max_backlog", @@ -551,7 +555,7 @@ static struct ctl_table net_core_table[] = { .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, + .extra1 = &netdev_budget_usecs_min, }, { .procname = "fb_tunnels_only_for_init_net", diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 54086bb05c42..f7554dcdaaba 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -1166,8 +1166,12 @@ static u8 dccp_feat_change_recv(struct list_head *fn, u8 is_mandatory, u8 opt, goto not_valid_or_not_known; } - return dccp_feat_push_confirm(fn, feat, local, &fval); + if (dccp_feat_push_confirm(fn, feat, local, &fval)) { + kfree(fval.sp.vec); + return DCCP_RESET_CODE_TOO_BUSY; + } + return 0; } else if (entry->state == FEAT_UNSTABLE) { /* 6.6.2 */ return 0; } diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index d90bb941f2ad..8f5f56b1e5f8 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -615,7 +615,7 @@ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) by tcp. Feel free to propose better solution. --ANK (980728) */ - if (np->rxopt.all) + if (np->rxopt.all && sk->sk_state != DCCP_LISTEN) opt_skb = skb_clone_and_charge_r(skb, sk); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ diff --git a/net/dsa/port.c b/net/dsa/port.c index 750fe68d9b2a..cd1741792f6a 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -1599,6 +1599,21 @@ dsa_port_phylink_mac_select_pcs(struct phylink_config *config, return pcs; } +static int dsa_port_phylink_mac_prepare(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); + struct dsa_switch *ds = dp->ds; + int err = 0; + + if (ds->ops->phylink_mac_prepare) + err = ds->ops->phylink_mac_prepare(ds, dp->index, mode, + interface); + + return err; +} + static void dsa_port_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) @@ -1612,6 +1627,21 @@ static void dsa_port_phylink_mac_config(struct phylink_config *config, ds->ops->phylink_mac_config(ds, dp->index, mode, state); } +static int dsa_port_phylink_mac_finish(struct phylink_config *config, + unsigned int mode, + phy_interface_t interface) +{ + struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); + struct dsa_switch *ds = dp->ds; + int err = 0; + + if (ds->ops->phylink_mac_finish) + err = ds->ops->phylink_mac_finish(ds, dp->index, mode, + interface); + + return err; +} + static void dsa_port_phylink_mac_an_restart(struct phylink_config *config) { struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); @@ -1667,7 +1697,9 @@ static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { .validate = dsa_port_phylink_validate, .mac_select_pcs = dsa_port_phylink_mac_select_pcs, .mac_pcs_get_state = dsa_port_phylink_mac_pcs_get_state, + .mac_prepare = dsa_port_phylink_mac_prepare, .mac_config = dsa_port_phylink_mac_config, + .mac_finish = dsa_port_phylink_mac_finish, .mac_an_restart = dsa_port_phylink_mac_an_restart, .mac_link_down = dsa_port_phylink_mac_link_down, .mac_link_up = dsa_port_phylink_mac_link_up, diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 5fe075bf479e..caeb7e75b287 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -2592,13 +2592,14 @@ EXPORT_SYMBOL_GPL(dsa_slave_dev_check); static int dsa_slave_changeupper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) { - struct dsa_port *dp = dsa_slave_to_port(dev); struct netlink_ext_ack *extack; int err = NOTIFY_DONE; + struct dsa_port *dp; if (!dsa_slave_dev_check(dev)) return err; + dp = dsa_slave_to_port(dev); extack = netdev_notifier_info_to_extack(&info->info); if (netif_is_bridge_master(info->upper_dev)) { @@ -2652,11 +2653,13 @@ static int dsa_slave_changeupper(struct net_device *dev, static int dsa_slave_prechangeupper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp; if (!dsa_slave_dev_check(dev)) return NOTIFY_DONE; + dp = dsa_slave_to_port(dev); + if (netif_is_bridge_master(info->upper_dev) && !info->linking) dsa_port_pre_bridge_leave(dp, info->upper_dev); else if (netif_is_lag_master(info->upper_dev) && !info->linking) diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 89371b16416e..6a33f35a0f74 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -180,7 +180,7 @@ static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid) err = ds->ops->tag_8021q_vlan_del(ds, port, vid); if (err) { - refcount_inc(&v->refcount); + refcount_set(&v->refcount, 1); return err; } diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c index 0515d6604b3b..f0883357d12e 100644 --- a/net/ethtool/bitset.c +++ b/net/ethtool/bitset.c @@ -425,12 +425,32 @@ static int ethnl_parse_bit(unsigned int *index, bool *val, unsigned int nbits, return 0; } +/** + * ethnl_bitmap32_equal() - Compare two bitmaps + * @map1: first bitmap + * @map2: second bitmap + * @nbits: bit size to compare + * + * Return: true if first @nbits are equal, false if not + */ +static bool ethnl_bitmap32_equal(const u32 *map1, const u32 *map2, + unsigned int nbits) +{ + if (memcmp(map1, map2, nbits / 32 * sizeof(u32))) + return false; + if (nbits % 32 == 0) + return true; + return !((map1[nbits / 32] ^ map2[nbits / 32]) & + ethnl_lower_bits(nbits % 32)); +} + static int ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, const struct nlattr *attr, struct nlattr **tb, ethnl_string_array_t names, struct netlink_ext_ack *extack, bool *mod) { + u32 *saved_bitmap = NULL; struct nlattr *bit_attr; bool no_mask; int rem; @@ -448,8 +468,20 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, } no_mask = tb[ETHTOOL_A_BITSET_NOMASK]; - if (no_mask) - ethnl_bitmap32_clear(bitmap, 0, nbits, mod); + if (no_mask) { + unsigned int nwords = DIV_ROUND_UP(nbits, 32); + unsigned int nbytes = nwords * sizeof(u32); + bool dummy; + + /* The bitmap size is only the size of the map part without + * its mask part. + */ + saved_bitmap = kcalloc(nwords, sizeof(u32), GFP_KERNEL); + if (!saved_bitmap) + return -ENOMEM; + memcpy(saved_bitmap, bitmap, nbytes); + ethnl_bitmap32_clear(bitmap, 0, nbits, &dummy); + } nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) { bool old_val, new_val; @@ -458,22 +490,30 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits, if (nla_type(bit_attr) != ETHTOOL_A_BITSET_BITS_BIT) { NL_SET_ERR_MSG_ATTR(extack, bit_attr, "only ETHTOOL_A_BITSET_BITS_BIT allowed in ETHTOOL_A_BITSET_BITS"); + kfree(saved_bitmap); return -EINVAL; } ret = ethnl_parse_bit(&idx, &new_val, nbits, bit_attr, no_mask, names, extack); - if (ret < 0) + if (ret < 0) { + kfree(saved_bitmap); return ret; + } old_val = bitmap[idx / 32] & ((u32)1 << (idx % 32)); if (new_val != old_val) { if (new_val) bitmap[idx / 32] |= ((u32)1 << (idx % 32)); else bitmap[idx / 32] &= ~((u32)1 << (idx % 32)); - *mod = true; + if (!no_mask) + *mod = true; } } + if (no_mask && !ethnl_bitmap32_equal(saved_bitmap, bitmap, nbits)) + *mod = true; + + kfree(saved_bitmap); return 0; } diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index fc4ccecf9495..98c064113882 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -41,7 +41,7 @@ int ethnl_ops_begin(struct net_device *dev) pm_runtime_get_sync(dev->dev.parent); if (!netif_device_present(dev) || - dev->reg_state == NETREG_UNREGISTERING) { + dev->reg_state >= NETREG_UNREGISTERING) { ret = -ENODEV; goto err; } @@ -384,7 +384,7 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info) ret = ops->prepare_data(req_info, reply_data, info); rtnl_unlock(); if (ret < 0) - goto err_cleanup; + goto err_dev; ret = ops->reply_size(req_info, reply_data); if (ret < 0) goto err_cleanup; @@ -442,7 +442,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, ret = ctx->ops->prepare_data(ctx->req_info, ctx->reply_data, NULL); rtnl_unlock(); if (ret < 0) - goto out; + goto out_cancel; ret = ethnl_fill_reply_header(skb, dev, ctx->ops->hdr_attr); if (ret < 0) goto out; @@ -451,6 +451,7 @@ static int ethnl_default_dump_one(struct sk_buff *skb, struct net_device *dev, out: if (ctx->ops->cleanup_data) ctx->ops->cleanup_data(ctx->reply_data); +out_cancel: ctx->reply_data->dev = NULL; if (ret < 0) genlmsg_cancel(skb, ehdr); @@ -636,7 +637,7 @@ static void ethnl_default_notify(struct net_device *dev, unsigned int cmd, ethnl_init_reply_data(reply_data, ops, dev); ret = ops->prepare_data(req_info, reply_data, NULL); if (ret < 0) - goto err_cleanup; + goto err_rep; ret = ops->reply_size(req_info, reply_data); if (ret < 0) goto err_cleanup; @@ -671,6 +672,7 @@ err_skb: err_cleanup: if (ops->cleanup_data) ops->cleanup_data(reply_data); +err_rep: kfree(reply_data); kfree(req_info); return; diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index ad75724b69ad..6e434af189bc 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -253,6 +253,8 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master) skb->dev = master->dev; skb->priority = TC_PRIO_CONTROL; + skb_reset_network_header(skb); + skb_reset_transport_header(skb); if (dev_hard_header(skb, skb->dev, ETH_P_PRP, hsr->sup_multicast_addr, skb->dev->dev_addr, skb->len) <= 0) @@ -260,8 +262,6 @@ static struct sk_buff *hsr_init_skb(struct hsr_port *master) skb_reset_mac_header(skb); skb_reset_mac_len(skb); - skb_reset_network_header(skb); - skb_reset_transport_header(skb); return skb; out: diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 0323ab5023c6..9317f96127c1 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -588,7 +588,12 @@ static int fill_frame_info(struct hsr_frame_info *frame, frame->is_vlan = true; if (frame->is_vlan) { - vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr; + /* Note: skb->mac_len might be wrong here. */ + if (!pskb_may_pull(skb, + skb_mac_offset(skb) + + offsetofend(struct hsr_vlan_ethhdr, vlanhdr))) + return -EINVAL; + vlan_hdr = (struct hsr_vlan_ethhdr *)skb_mac_header(skb); proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto; /* FIXME: */ netdev_warn_once(skb->dev, "VLAN not yet supported"); diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c index 1fa2fe041ec0..9f74be2aad00 100644 --- a/net/ieee802154/socket.c +++ b/net/ieee802154/socket.c @@ -1046,19 +1046,21 @@ static int ieee802154_create(struct net *net, struct socket *sock, if (sk->sk_prot->hash) { rc = sk->sk_prot->hash(sk); - if (rc) { - sk_common_release(sk); - goto out; - } + if (rc) + goto out_sk_release; } if (sk->sk_prot->init) { rc = sk->sk_prot->init(sk); if (rc) - sk_common_release(sk); + goto out_sk_release; } out: return rc; +out_sk_release: + sk_common_release(sk); + sock->sk = NULL; + goto out; } static const struct net_proto_family ieee802154_family_ops = { diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index cc013be9b02c..4f3c932e37ed 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -371,32 +371,30 @@ lookup_protocol: inet->inet_sport = htons(inet->inet_num); /* Add to protocol hash chains. */ err = sk->sk_prot->hash(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } if (sk->sk_prot->init) { err = sk->sk_prot->init(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } if (!kern) { err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } out: return err; out_rcu_unlock: rcu_read_unlock(); goto out; +out_sk_release: + sk_common_release(sk); + sock->sk = NULL; + goto out; } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index ccff96820a70..50e2b4939d8e 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -658,10 +658,12 @@ static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb */ void arp_xmit(struct sk_buff *skb) { + rcu_read_lock(); /* Send it off, maybe filter it using firewalling first. */ NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, - dev_net(skb->dev), NULL, skb, NULL, skb->dev, + dev_net_rcu(skb->dev), NULL, skb, NULL, skb->dev, arp_xmit_finish); + rcu_read_unlock(); } EXPORT_SYMBOL(arp_xmit); @@ -1028,7 +1030,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r, if (mask && mask != htonl(0xFFFFFFFF)) return -EINVAL; if (!dev && (r->arp_flags & ATF_COM)) { - dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family, + dev = dev_getbyhwaddr(net, r->arp_ha.sa_family, r->arp_ha.sa_data); if (!dev) return -ENODEV; diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 2b56cabe4da9..fd139daf9dfb 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1955,7 +1955,7 @@ int cipso_v4_req_setattr(struct request_sock *req, buf = NULL; req_inet = inet_rsk(req); - opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt); + opt = unrcu_pointer(xchg(&req_inet->ireq_opt, RCU_INITIALIZER(opt))); if (opt) kfree_rcu(opt, rcu); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 430ca93ba939..1738cc2bfc7f 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -1320,10 +1320,11 @@ __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope) __be32 addr = 0; unsigned char localnet_scope = RT_SCOPE_HOST; struct in_device *in_dev; - struct net *net = dev_net(dev); + struct net *net; int master_idx; rcu_read_lock(); + net = dev_net_rcu(dev); in_dev = __in_dev_get_rcu(dev); if (!in_dev) goto no_in_dev; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 419969b26822..8f5417ff355d 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -118,47 +118,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) } #ifdef CONFIG_INET_ESPINTCP -struct esp_tcp_sk { - struct sock *sk; - struct rcu_head rcu; -}; - -static void esp_free_tcp_sk(struct rcu_head *head) -{ - struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); - - sock_put(esk->sk); - kfree(esk); -} - static struct sock *esp_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); - struct esp_tcp_sk *esk; __be16 sport, dport; - struct sock *nsk; struct sock *sk; - sk = rcu_dereference(x->encap_sk); - if (sk && sk->sk_state == TCP_ESTABLISHED) - return sk; - spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (sk && sk == nsk) { - esk = kmalloc(sizeof(*esk), GFP_ATOMIC); - if (!esk) { - spin_unlock_bh(&x->lock); - return ERR_PTR(-ENOMEM); - } - RCU_INIT_POINTER(x->encap_sk, NULL); - esk->sk = sk; - call_rcu(&esk->rcu, esp_free_tcp_sk); - } spin_unlock_bh(&x->lock); sk = inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, x->id.daddr.a4, @@ -171,20 +140,6 @@ static struct sock *esp_find_tcp_sk(struct xfrm_state *x) return ERR_PTR(-EINVAL); } - spin_lock_bh(&x->lock); - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (encap->encap_sport != sport || - encap->encap_dport != dport) { - sock_put(sk); - sk = nsk ?: ERR_PTR(-EREMCHG); - } else if (sk == nsk) { - sock_put(sk); - } else { - rcu_assign_pointer(x->encap_sk, sk); - } - spin_unlock_bh(&x->lock); - return sk; } @@ -207,6 +162,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk); + sock_put(sk); + out: rcu_read_unlock(); return err; @@ -391,6 +348,8 @@ static struct ip_esp_hdr *esp_output_tcp_encap(struct xfrm_state *x, if (IS_ERR(sk)) return ERR_CAST(sk); + sock_put(sk); + *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 90ce87ffed46..7993ff46de23 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -829,19 +829,33 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb, } } + if (cfg->fc_dst_len > 32) { + NL_SET_ERR_MSG(extack, "Invalid prefix length"); + err = -EINVAL; + goto errout; + } + + if (cfg->fc_dst_len < 32 && (ntohl(cfg->fc_dst) << cfg->fc_dst_len)) { + NL_SET_ERR_MSG(extack, "Invalid prefix for given prefix length"); + err = -EINVAL; + goto errout; + } + if (cfg->fc_nh_id) { if (cfg->fc_oif || cfg->fc_gw_family || cfg->fc_encap || cfg->fc_mp) { NL_SET_ERR_MSG(extack, "Nexthop specification and nexthop id are mutually exclusive"); - return -EINVAL; + err = -EINVAL; + goto errout; } } if (has_gw && has_via) { NL_SET_ERR_MSG(extack, "Nexthop configuration can not contain both GATEWAY and VIA"); - return -EINVAL; + err = -EINVAL; + goto errout; } if (!cfg->fc_table) diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 513f475c6a53..298a9944a3d1 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -222,9 +222,9 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct nlattr **tb, struct netlink_ext_ack *extack) { - struct net *net = sock_net(skb->sk); + struct fib4_rule *rule4 = (struct fib4_rule *)rule; + struct net *net = rule->fr_net; int err = -EINVAL; - struct fib4_rule *rule4 = (struct fib4_rule *) rule; if (!inet_validate_dscp(frh->tos)) { NL_SET_ERR_MSG(extack, diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 77b97c48da5e..fa54b36b241a 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1192,22 +1192,6 @@ static int fib_insert_alias(struct trie *t, struct key_vector *tp, return 0; } -static bool fib_valid_key_len(u32 key, u8 plen, struct netlink_ext_ack *extack) -{ - if (plen > KEYLENGTH) { - NL_SET_ERR_MSG(extack, "Invalid prefix length"); - return false; - } - - if ((plen < KEYLENGTH) && (key << plen)) { - NL_SET_ERR_MSG(extack, - "Invalid prefix for given prefix length"); - return false; - } - - return true; -} - static void fib_remove_alias(struct trie *t, struct key_vector *tp, struct key_vector *l, struct fib_alias *old); @@ -1228,9 +1212,6 @@ int fib_table_insert(struct net *net, struct fib_table *tb, key = ntohl(cfg->fc_dst); - if (!fib_valid_key_len(key, plen, extack)) - return -EINVAL; - pr_debug("Insert table=%u %08x/%d\n", tb->tb_id, key, plen); fi = fib_create_info(cfg, extack); @@ -1723,9 +1704,6 @@ int fib_table_delete(struct net *net, struct fib_table *tb, key = ntohl(cfg->fc_dst); - if (!fib_valid_key_len(key, plen, extack)) - return -EINVAL; - l = fib_find_node(t, &tp, key); if (!l) return -ESRCH; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 9dffdd876fef..94501bb30c43 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -93,6 +93,9 @@ #include #include #include +#include +#define CREATE_TRACE_POINTS +#include /* * Build xmit assembly blocks @@ -316,7 +319,6 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, struct dst_entry *dst = &rt->dst; struct inet_peer *peer; bool rc = true; - int vif; if (!apply_ratelimit) return true; @@ -325,12 +327,12 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) goto out; - vif = l3mdev_master_ifindex(dst->dev); - peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); + rcu_read_lock(); + peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, + l3mdev_master_ifindex_rcu(dst->dev)); rc = inet_peer_xrlim_allow(peer, READ_ONCE(net->ipv4.sysctl_icmp_ratelimit)); - if (peer) - inet_putpeer(peer); + rcu_read_unlock(); out: if (!rc) __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST); @@ -404,10 +406,10 @@ static void icmp_push_reply(struct sock *sk, static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) { - struct ipcm_cookie ipc; struct rtable *rt = skb_rtable(skb); - struct net *net = dev_net(rt->dst.dev); + struct net *net = dev_net_rcu(rt->dst.dev); bool apply_ratelimit = false; + struct ipcm_cookie ipc; struct flowi4 fl4; struct sock *sk; struct inet_sock *inet; @@ -482,13 +484,11 @@ static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb) return route_lookup_dev; } -static struct rtable *icmp_route_lookup(struct net *net, - struct flowi4 *fl4, +static struct rtable *icmp_route_lookup(struct net *net, struct flowi4 *fl4, struct sk_buff *skb_in, - const struct iphdr *iph, - __be32 saddr, u8 tos, u32 mark, - int type, int code, - struct icmp_bxm *param) + const struct iphdr *iph, __be32 saddr, + dscp_t dscp, u32 mark, int type, + int code, struct icmp_bxm *param) { struct net_device *route_lookup_dev; struct rtable *rt, *rt2; @@ -501,7 +501,7 @@ static struct rtable *icmp_route_lookup(struct net *net, fl4->saddr = saddr; fl4->flowi4_mark = mark; fl4->flowi4_uid = sock_net_uid(net, NULL); - fl4->flowi4_tos = RT_TOS(tos); + fl4->flowi4_tos = inet_dscp_to_dsfield(dscp); fl4->flowi4_proto = IPPROTO_ICMP; fl4->fl4_icmp_type = type; fl4->fl4_icmp_code = code; @@ -549,7 +549,7 @@ static struct rtable *icmp_route_lookup(struct net *net, orefdst = skb_in->_skb_refdst; /* save old refdst */ skb_dst_set(skb_in, NULL); err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr, - RT_TOS(tos), rt2->dst.dev); + dscp, rt2->dst.dev); dst_release(&rt2->dst); rt2 = skb_rtable(skb_in); @@ -610,12 +610,14 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, struct sock *sk; if (!rt) - goto out; + return; + + rcu_read_lock(); if (rt->dst.dev) - net = dev_net(rt->dst.dev); + net = dev_net_rcu(rt->dst.dev); else if (skb_in->dev) - net = dev_net(skb_in->dev); + net = dev_net_rcu(skb_in->dev); else goto out; @@ -743,8 +745,9 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, ipc.opt = &icmp_param.replyopts.opt; ipc.sockc.mark = mark; - rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, - type, code, &icmp_param); + rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, + inet_dsfield_to_dscp(tos), mark, type, code, + &icmp_param); if (IS_ERR(rt)) goto out_unlock; @@ -777,6 +780,8 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, if (!fl4.saddr) fl4.saddr = htonl(INADDR_DUMMY); + trace_icmp_send(skb_in, type, code); + icmp_push_reply(sk, &icmp_param, &fl4, &ipc, &rt); ende: ip_rt_put(rt); @@ -784,7 +789,8 @@ out_unlock: icmp_xmit_unlock(sk); out_bh_enable: local_bh_enable(); -out:; +out: + rcu_read_unlock(); } EXPORT_SYMBOL(__icmp_send); @@ -833,7 +839,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info) * avoid additional coding at protocol handlers. */ if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) { - __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS); + __ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS); return; } @@ -867,7 +873,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb) struct net *net; u32 info = 0; - net = dev_net(skb_dst(skb)->dev); + net = dev_net_rcu(skb_dst(skb)->dev); /* * Incomplete header ? @@ -978,7 +984,7 @@ out_err: static enum skb_drop_reason icmp_redirect(struct sk_buff *skb) { if (skb->len < sizeof(struct iphdr)) { - __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS); + __ICMP_INC_STATS(dev_net_rcu(skb->dev), ICMP_MIB_INERRORS); return SKB_DROP_REASON_PKT_TOO_SMALL; } @@ -1010,7 +1016,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb) struct icmp_bxm icmp_param; struct net *net; - net = dev_net(skb_dst(skb)->dev); + net = dev_net_rcu(skb_dst(skb)->dev); /* should there be an ICMP stat for ignored echos? */ if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all)) return SKB_NOT_DROPPED_YET; @@ -1039,9 +1045,9 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb) bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr) { + struct net *net = dev_net_rcu(skb->dev); struct icmp_ext_hdr *ext_hdr, _ext_hdr; struct icmp_ext_echo_iio *iio, _iio; - struct net *net = dev_net(skb->dev); struct inet6_dev *in6_dev; struct in_device *in_dev; struct net_device *dev; @@ -1180,7 +1186,7 @@ static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb) return SKB_NOT_DROPPED_YET; out_err: - __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); + __ICMP_INC_STATS(dev_net_rcu(skb_dst(skb)->dev), ICMP_MIB_INERRORS); return SKB_DROP_REASON_PKT_TOO_SMALL; } @@ -1197,7 +1203,7 @@ int icmp_rcv(struct sk_buff *skb) { enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; struct rtable *rt = skb_rtable(skb); - struct net *net = dev_net(rt->dst.dev); + struct net *net = dev_net_rcu(rt->dst.dev); struct icmphdr *icmph; if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { @@ -1370,9 +1376,9 @@ int icmp_err(struct sk_buff *skb, u32 info) struct iphdr *iph = (struct iphdr *)skb->data; int offset = iph->ihl<<2; struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset); + struct net *net = dev_net_rcu(skb->dev); int type = icmp_hdr(skb)->type; int code = icmp_hdr(skb)->code; - struct net *net = dev_net(skb->dev); /* * Use ping_err to handle all icmp errors except those diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 569186f741fb..8fa56a17f03a 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -1121,7 +1121,7 @@ no_ownership: drop: __inet_csk_reqsk_queue_drop(sk_listener, oreq, true); - reqsk_put(req); + reqsk_put(oreq); } static bool reqsk_queue_hash_req(struct request_sock *req, diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index ada7302af59d..3cb666a26473 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -1397,6 +1397,7 @@ int inet_diag_handler_get_info(struct sk_buff *skb, struct sock *sk) } static const struct sock_diag_handler inet_diag_handler = { + .owner = THIS_MODULE, .family = AF_INET, .dump = inet_diag_handler_cmd, .get_info = inet_diag_handler_get_info, @@ -1404,6 +1405,7 @@ static const struct sock_diag_handler inet_diag_handler = { }; static const struct sock_diag_handler inet6_diag_handler = { + .owner = THIS_MODULE, .family = AF_INET6, .dump = inet_diag_handler_cmd, .get_info = inet_diag_handler_get_info, @@ -1443,6 +1445,11 @@ void inet_diag_unregister(const struct inet_diag_handler *h) } EXPORT_SYMBOL_GPL(inet_diag_unregister); +static const struct sock_diag_inet_compat inet_diag_compat = { + .owner = THIS_MODULE, + .fn = inet_diag_rcv_msg_compat, +}; + static int __init inet_diag_init(void) { const int inet_diag_table_size = (IPPROTO_MAX * @@ -1461,7 +1468,7 @@ static int __init inet_diag_init(void) if (err) goto out_free_inet; - sock_diag_register_inet_compat(inet_diag_rcv_msg_compat); + sock_diag_register_inet_compat(&inet_diag_compat); out: return err; @@ -1476,7 +1483,7 @@ static void __exit inet_diag_exit(void) { sock_diag_unregister(&inet6_diag_handler); sock_diag_unregister(&inet_diag_handler); - sock_diag_unregister_inet_compat(inet_diag_rcv_msg_compat); + sock_diag_unregister_inet_compat(&inet_diag_compat); kfree(inet_diag_table); } diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 321f509f2347..5e7cdcebd64f 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -1218,22 +1218,37 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) { unsigned int locksz = sizeof(spinlock_t); unsigned int i, nblocks = 1; + spinlock_t *ptr = NULL; - if (locksz != 0) { - /* allocate 2 cache lines or at least one spinlock per cpu */ - nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); - nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); + if (locksz == 0) + goto set_mask; - /* no more locks than number of hash buckets */ - nblocks = min(nblocks, hashinfo->ehash_mask + 1); + /* Allocate 2 cache lines or at least one spinlock per cpu. */ + nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U) * num_possible_cpus(); - hashinfo->ehash_locks = kvmalloc_array(nblocks, locksz, GFP_KERNEL); - if (!hashinfo->ehash_locks) - return -ENOMEM; + /* At least one page per NUMA node. */ + nblocks = max(nblocks, num_online_nodes() * PAGE_SIZE / locksz); - for (i = 0; i < nblocks; i++) - spin_lock_init(&hashinfo->ehash_locks[i]); + nblocks = roundup_pow_of_two(nblocks); + + /* No more locks than number of hash buckets. */ + nblocks = min(nblocks, hashinfo->ehash_mask + 1); + + if (num_online_nodes() > 1) { + /* Use vmalloc() to allow NUMA policy to spread pages + * on all available nodes if desired. + */ + ptr = vmalloc_array(nblocks, locksz); } + if (!ptr) { + ptr = kvmalloc_array(nblocks, locksz, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + } + for (i = 0; i < nblocks; i++) + spin_lock_init(&ptr[i]); + hashinfo->ehash_locks = ptr; +set_mask: hashinfo->ehash_locks_mask = nblocks - 1; return 0; } diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index e9fed83e9b3c..23896b6b8417 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -98,6 +98,7 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr, { struct rb_node **pp, *parent, *next; struct inet_peer *p; + u32 now; pp = &base->rb_root.rb_node; parent = NULL; @@ -111,8 +112,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr, p = rb_entry(parent, struct inet_peer, rb_node); cmp = inetpeer_addr_cmp(daddr, &p->daddr); if (cmp == 0) { - if (!refcount_inc_not_zero(&p->refcnt)) - break; + now = jiffies; + if (READ_ONCE(p->dtime) != now) + WRITE_ONCE(p->dtime, now); return p; } if (gc_stack) { @@ -158,9 +160,6 @@ static void inet_peer_gc(struct inet_peer_base *base, for (i = 0; i < gc_cnt; i++) { p = gc_stack[i]; - /* The READ_ONCE() pairs with the WRITE_ONCE() - * in inet_putpeer() - */ delta = (__u32)jiffies - READ_ONCE(p->dtime); if (delta < ttl || !refcount_dec_if_one(&p->refcnt)) @@ -176,31 +175,23 @@ static void inet_peer_gc(struct inet_peer_base *base, } } +/* Must be called under RCU : No refcount change is done here. */ struct inet_peer *inet_getpeer(struct inet_peer_base *base, - const struct inetpeer_addr *daddr, - int create) + const struct inetpeer_addr *daddr) { struct inet_peer *p, *gc_stack[PEER_MAX_GC]; struct rb_node **pp, *parent; unsigned int gc_cnt, seq; - int invalidated; /* Attempt a lockless lookup first. * Because of a concurrent writer, we might not find an existing entry. */ - rcu_read_lock(); seq = read_seqbegin(&base->lock); p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); - invalidated = read_seqretry(&base->lock, seq); - rcu_read_unlock(); if (p) return p; - /* If no writer did a change during our lookup, we can return early. */ - if (!create && !invalidated) - return NULL; - /* retry an exact lookup, taking the lock before. * At least, nodes should be hot in our cache. */ @@ -209,12 +200,12 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, gc_cnt = 0; p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); - if (!p && create) { + if (!p) { p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC); if (p) { p->daddr = *daddr; p->dtime = (__u32)jiffies; - refcount_set(&p->refcnt, 2); + refcount_set(&p->refcnt, 1); atomic_set(&p->rid, 0); p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->rate_tokens = 0; @@ -239,15 +230,9 @@ EXPORT_SYMBOL_GPL(inet_getpeer); void inet_putpeer(struct inet_peer *p) { - /* The WRITE_ONCE() pairs with itself (we run lockless) - * and the READ_ONCE() in inet_peer_gc() - */ - WRITE_ONCE(p->dtime, (__u32)jiffies); - if (refcount_dec_and_test(&p->refcnt)) call_rcu(&p->rcu, inetpeer_free_rcu); } -EXPORT_SYMBOL_GPL(inet_putpeer); /* * Check transmit rate limitation for given message. diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 6c309c1ec3b0..0ed999fdca2d 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -82,15 +82,20 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, static void ip4_frag_init(struct inet_frag_queue *q, const void *a) { struct ipq *qp = container_of(q, struct ipq, q); - struct net *net = q->fqdir->net; - const struct frag_v4_compare_key *key = a; + struct net *net = q->fqdir->net; + struct inet_peer *p = NULL; q->key.v4 = *key; qp->ecn = 0; - qp->peer = q->fqdir->max_dist ? - inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) : - NULL; + if (q->fqdir->max_dist) { + rcu_read_lock(); + p = inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif); + if (p && !refcount_inc_not_zero(&p->refcnt)) + p = NULL; + rcu_read_unlock(); + } + qp->peer = p; } static void ip4_frag_free(struct inet_frag_queue *q) diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index a9e22a098872..b4c59708fc09 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -617,7 +617,8 @@ int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev) orefdst = skb->_skb_refdst; skb_dst_set(skb, NULL); - err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev); + err = ip_route_input(skb, nexthop, iph->saddr, ip4h_dscp(iph), + dev); rt2 = skb_rtable(skb); if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { skb_dst_drop(skb); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index a6d460aaee79..543d029102cf 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -980,8 +980,8 @@ static int __ip_append_data(struct sock *sk, unsigned int maxfraglen, fragheaderlen, maxnonfragsize; int csummode = CHECKSUM_NONE; struct rtable *rt = (struct rtable *)cork->dst; + bool paged, hold_tskey, extra_uref = false; unsigned int wmem_alloc_delta = 0; - bool paged, extra_uref = false; u32 tskey = 0; skb = skb_peek_tail(queue); @@ -990,10 +990,6 @@ static int __ip_append_data(struct sock *sk, mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; paged = !!cork->gso_size; - if (cork->tx_flags & SKBTX_ANY_TSTAMP && - READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) - tskey = atomic_inc_return(&sk->sk_tskey) - 1; - hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); @@ -1051,6 +1047,11 @@ static int __ip_append_data(struct sock *sk, cork->length += length; + hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP && + READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID; + if (hold_tskey) + tskey = atomic_inc_return(&sk->sk_tskey) - 1; + /* So, what's going on in the loop below? * * We use calculated fragment length to generate chained skb, @@ -1255,6 +1256,8 @@ error: cork->length -= length; IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); + if (hold_tskey) + atomic_dec(&sk->sk_tskey); return err; } @@ -1577,7 +1580,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk, * so icmphdr does not in skb linear region and can not get icmp_type * by icmp_hdr(skb)->type. */ - if (sk->sk_type == SOCK_RAW && !inet_sk(sk)->hdrincl) + if (sk->sk_type == SOCK_RAW && + !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH)) icmp_type = fl4->fl4_icmp_type; else icmp_type = icmp_hdr(skb)->type; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 3445e576b05b..73d7372afb43 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -43,6 +43,7 @@ #include #include #include +#include #if IS_ENABLED(CONFIG_IPV6) #include @@ -102,10 +103,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, if (!ip_tunnel_key_match(&t->parms, flags, key)) continue; - if (t->parms.link == link) + if (READ_ONCE(t->parms.link) == link) return t; - else - cand = t; + cand = t; } hlist_for_each_entry_rcu(t, head, hash_node) { @@ -117,9 +117,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, if (!ip_tunnel_key_match(&t->parms, flags, key)) continue; - if (t->parms.link == link) + if (READ_ONCE(t->parms.link) == link) return t; - else if (!cand) + if (!cand) cand = t; } @@ -137,9 +137,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, if (!ip_tunnel_key_match(&t->parms, flags, key)) continue; - if (t->parms.link == link) + if (READ_ONCE(t->parms.link) == link) return t; - else if (!cand) + if (!cand) cand = t; } @@ -150,9 +150,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, !(t->dev->flags & IFF_UP)) continue; - if (t->parms.link == link) + if (READ_ONCE(t->parms.link) == link) return t; - else if (!cand) + if (!cand) cand = t; } @@ -218,10 +218,10 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn, struct ip_tunnel *t = NULL; struct hlist_head *head = ip_bucket(itn, parms); - hlist_for_each_entry_rcu(t, head, hash_node) { + hlist_for_each_entry_rcu(t, head, hash_node, lockdep_rtnl_is_held()) { if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr && - link == t->parms.link && + link == READ_ONCE(t->parms.link) && type == t->dev->type && ip_tunnel_key_match(&t->parms, flags, key)) break; @@ -294,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr, iph->saddr, tunnel->parms.o_key, - RT_TOS(iph->tos), dev_net(dev), + iph->tos & INET_DSCP_MASK, tunnel->net, tunnel->parms.link, tunnel->fwmark, 0, 0); rt = ip_route_output_key(tunnel->net, &fl4); @@ -359,6 +359,20 @@ err_dev_set_mtu: return ERR_PTR(err); } +void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info) +{ + const struct iphdr *iph = ip_hdr(skb); + const struct udphdr *udph; + + if (iph->protocol != IPPROTO_UDP) + return; + + udph = (struct udphdr *)((__u8 *)iph + (iph->ihl << 2)); + info->encap.sport = udph->source; + info->encap.dport = udph->dest; +} +EXPORT_SYMBOL(ip_tunnel_md_udp_encap); + int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, bool log_ecn_error) @@ -596,10 +610,14 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph); } ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, - tunnel_id_to_key32(key->tun_id), RT_TOS(tos), - dev_net(dev), 0, skb->mark, skb_get_hash(skb), - key->flow_flags); - if (tunnel->encap.type != TUNNEL_ENCAP_NONE) + tunnel_id_to_key32(key->tun_id), + tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark, + skb_get_hash(skb), key->flow_flags); + + if (!tunnel_hlen) + tunnel_hlen = ip_encap_hlen(&tun_info->encap); + + if (ip_tunnel_encap(skb, &tun_info->encap, &proto, &fl4) < 0) goto tx_error; use_cache = ip_tunnel_dst_cache_usable(skb, tun_info); @@ -755,11 +773,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr, - tunnel->parms.o_key, RT_TOS(tos), - dev_net(dev), tunnel->parms.link, + tunnel->parms.o_key, tos & INET_DSCP_MASK, + tunnel->net, READ_ONCE(tunnel->parms.link), tunnel->fwmark, skb_get_hash(skb), 0); - if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) + if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0) goto tx_error; if (connected && md) { @@ -876,7 +894,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, if (t->parms.link != p->link || t->fwmark != fwmark) { int mtu; - t->parms.link = p->link; + WRITE_ONCE(t->parms.link, p->link); t->fwmark = fwmark; mtu = ip_tunnel_bind_dev(dev); if (set_mtu) @@ -1066,9 +1084,9 @@ EXPORT_SYMBOL(ip_tunnel_get_link_net); int ip_tunnel_get_iflink(const struct net_device *dev) { - struct ip_tunnel *tunnel = netdev_priv(dev); + const struct ip_tunnel *tunnel = netdev_priv(dev); - return tunnel->parms.link; + return READ_ONCE(tunnel->parms.link); } EXPORT_SYMBOL(ip_tunnel_get_iflink); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 80ccd6661aa3..deb08cab4464 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -415,7 +415,7 @@ int skb_tunnel_check_pmtu(struct sk_buff *skb, struct dst_entry *encap_dst, skb_dst_update_pmtu_no_confirm(skb, mtu); - if (!reply || skb->pkt_type == PACKET_HOST) + if (!reply) return 0; if (skb->protocol == htons(ETH_P_IP)) @@ -450,7 +450,7 @@ static const struct nla_policy geneve_opt_policy[LWTUNNEL_IP_OPT_GENEVE_MAX + 1] = { [LWTUNNEL_IP_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [LWTUNNEL_IP_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, - [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, + [LWTUNNEL_IP_OPT_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, }; static const struct nla_policy diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 180f9daf5bec..1cf35c50cdf4 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -241,6 +241,7 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto) tun_dst = ip_tun_rx_dst(skb, 0, 0, 0); if (!tun_dst) return 0; + ip_tunnel_md_udp_encap(skb, &tun_dst->u.tun_info); } skb_reset_mac_header(skb); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 3ed9ed2bffd2..f64651c0dea6 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -136,7 +136,7 @@ static struct mr_table *ipmr_mr_table_iter(struct net *net, return ret; } -static struct mr_table *ipmr_get_table(struct net *net, u32 id) +static struct mr_table *__ipmr_get_table(struct net *net, u32 id) { struct mr_table *mrt; @@ -147,6 +147,16 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id) return NULL; } +static struct mr_table *ipmr_get_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + + rcu_read_lock(); + mrt = __ipmr_get_table(net, id); + rcu_read_unlock(); + return mrt; +} + static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { @@ -188,7 +198,7 @@ static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, arg->table = fib_rule_get_table(rule, arg); - mrt = ipmr_get_table(rule->fr_net, arg->table); + mrt = __ipmr_get_table(rule->fr_net, arg->table); if (!mrt) return -EAGAIN; res->mrt = mrt; @@ -314,6 +324,8 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id) return net->ipv4.mrt; } +#define __ipmr_get_table ipmr_get_table + static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { @@ -402,7 +414,7 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) if (id != RT_TABLE_DEFAULT && id >= 1000000000) return ERR_PTR(-EINVAL); - mrt = ipmr_get_table(net, id); + mrt = __ipmr_get_table(net, id); if (mrt) return mrt; @@ -1373,7 +1385,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval, goto out_unlock; } - mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); + mrt = __ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT); if (!mrt) { ret = -ENOENT; goto out_unlock; @@ -2247,11 +2259,13 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb, struct mr_table *mrt; int err; - mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); - if (!mrt) - return -ENOENT; - rcu_read_lock(); + mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT); + if (!mrt) { + rcu_read_unlock(); + return -ENOENT; + } + cache = ipmr_cache_find(mrt, saddr, daddr); if (!cache && skb->dev) { int vif = ipmr_find_vif(mrt, skb->dev); @@ -2536,7 +2550,7 @@ static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0; tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0; - mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT); + mrt = __ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT); if (!mrt) { err = -ENOENT; goto errout_free; @@ -2588,7 +2602,7 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) if (filter.table_id) { struct mr_table *mrt; - mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id); + mrt = __ipmr_get_table(sock_net(skb->sk), filter.table_id); if (!mrt) { if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) return skb->len; @@ -2696,7 +2710,7 @@ static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh, break; } } - mrt = ipmr_get_table(net, tblid); + mrt = __ipmr_get_table(net, tblid); if (!mrt) { ret = -ENOENT; goto out; @@ -2904,13 +2918,15 @@ static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) struct net *net = seq_file_net(seq); struct mr_table *mrt; - mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); - if (!mrt) + rcu_read_lock(); + mrt = __ipmr_get_table(net, RT_TABLE_DEFAULT); + if (!mrt) { + rcu_read_unlock(); return ERR_PTR(-ENOENT); + } iter->mrt = mrt; - rcu_read_lock(); return mr_vif_seq_start(seq, pos); } diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c index 271dc03fc6db..de98ce66d38f 100644 --- a/net/ipv4/ipmr_base.c +++ b/net/ipv4/ipmr_base.c @@ -310,7 +310,8 @@ int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb, if (filter->filter_set) flags |= NLM_F_DUMP_FILTERED; - list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) { + list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list, + lockdep_rtnl_is_held()) { if (e < s_e) goto next_entry; if (filter->dev && @@ -329,9 +330,6 @@ next_entry: list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) { if (e < s_e) goto next_entry2; - if (filter->dev && - !mr_mfc_uses_dev(mrt, mfc, filter->dev)) - goto next_entry2; err = fill(mrt, skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index ee0efd0efec4..c109bc376cc5 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -608,6 +608,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), daddr, saddr, 0, 0, sk->sk_uid); + fl4.fl4_icmp_type = 0; + fl4.fl4_icmp_code = 0; + if (!hdrincl) { rfv.msg = msg; rfv.hlen = 0; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index fda88894d020..4574dcba9f19 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -393,7 +393,13 @@ static inline int ip_rt_proc_init(void) static inline bool rt_is_expired(const struct rtable *rth) { - return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev)); + bool res; + + rcu_read_lock(); + res = rth->rt_genid != rt_genid_ipv4(dev_net_rcu(rth->dst.dev)); + rcu_read_unlock(); + + return res; } void rt_cache_flush(struct net *net) @@ -882,11 +888,11 @@ void ip_rt_send_redirect(struct sk_buff *skb) } log_martians = IN_DEV_LOG_MARTIANS(in_dev); vif = l3mdev_master_ifindex_rcu(rt->dst.dev); - rcu_read_unlock(); net = dev_net(rt->dst.dev); - peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1); + peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif); if (!peer) { + rcu_read_unlock(); icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt_nexthop(rt, ip_hdr(skb)->daddr)); return; @@ -905,7 +911,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) */ if (peer->n_redirects >= ip_rt_redirect_number) { peer->rate_last = jiffies; - goto out_put_peer; + goto out_unlock; } /* Check for load limit; set rate_last to the latest sent @@ -926,8 +932,8 @@ void ip_rt_send_redirect(struct sk_buff *skb) &ip_hdr(skb)->saddr, inet_iif(skb), &ip_hdr(skb)->daddr, &gw); } -out_put_peer: - inet_putpeer(peer); +out_unlock: + rcu_read_unlock(); } static int ip_error(struct sk_buff *skb) @@ -987,9 +993,9 @@ static int ip_error(struct sk_buff *skb) break; } + rcu_read_lock(); peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, - l3mdev_master_ifindex(skb->dev), 1); - + l3mdev_master_ifindex_rcu(skb->dev)); send = true; if (peer) { now = jiffies; @@ -1001,8 +1007,9 @@ static int ip_error(struct sk_buff *skb) peer->rate_tokens -= ip_rt_error_cost; else send = false; - inet_putpeer(peer); } + rcu_read_unlock(); + if (send) icmp_send(skb, ICMP_DEST_UNREACH, code, 0); @@ -1013,9 +1020,9 @@ out: kfree_skb_reason(skb, reason); static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) { struct dst_entry *dst = &rt->dst; - struct net *net = dev_net(dst->dev); struct fib_result res; bool lock = false; + struct net *net; u32 old_mtu; if (ip_mtu_locked(dst)) @@ -1025,6 +1032,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) if (old_mtu < mtu) return; + rcu_read_lock(); + net = dev_net_rcu(dst->dev); if (mtu < net->ipv4.ip_rt_min_pmtu) { lock = true; mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu); @@ -1032,17 +1041,29 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) if (rt->rt_pmtu == mtu && !lock && time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2)) - return; + goto out; - rcu_read_lock(); if (fib_lookup(net, fl4, &res, 0) == 0) { struct fib_nh_common *nhc; fib_select_path(net, &res, fl4, NULL); +#ifdef CONFIG_IP_ROUTE_MULTIPATH + if (fib_info_num_path(res.fi) > 1) { + int nhsel; + + for (nhsel = 0; nhsel < fib_info_num_path(res.fi); nhsel++) { + nhc = fib_info_nhc(res.fi, nhsel); + update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, + jiffies + net->ipv4.ip_rt_mtu_expires); + } + goto out; + } +#endif /* CONFIG_IP_ROUTE_MULTIPATH */ nhc = FIB_RES_NHC(res); update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, jiffies + net->ipv4.ip_rt_mtu_expires); } +out: rcu_read_unlock(); } @@ -1305,10 +1326,15 @@ static void set_class_tag(struct rtable *rt, u32 tag) static unsigned int ipv4_default_advmss(const struct dst_entry *dst) { - struct net *net = dev_net(dst->dev); unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr); - unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size, - net->ipv4.ip_rt_min_advmss); + unsigned int advmss; + struct net *net; + + rcu_read_lock(); + net = dev_net_rcu(dst->dev); + advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size, + net->ipv4.ip_rt_min_advmss); + rcu_read_unlock(); return min(advmss, IPV4_MAX_PMTU - header_size); } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index cb79919323a6..b64d53590f25 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1699,12 +1699,13 @@ EXPORT_SYMBOL(tcp_recv_skb); * or for 'peeking' the socket using this routine * (although both would be easy to implement). */ -int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, - sk_read_actor_t recv_actor) +static int __tcp_read_sock(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor, bool noack, + u32 *copied_seq) { struct sk_buff *skb; struct tcp_sock *tp = tcp_sk(sk); - u32 seq = tp->copied_seq; + u32 seq = *copied_seq; u32 offset; int copied = 0; @@ -1758,9 +1759,12 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_eat_recv_skb(sk, skb); if (!desc->count) break; - WRITE_ONCE(tp->copied_seq, seq); + WRITE_ONCE(*copied_seq, seq); } - WRITE_ONCE(tp->copied_seq, seq); + WRITE_ONCE(*copied_seq, seq); + + if (noack) + goto out; tcp_rcv_space_adjust(sk); @@ -1769,10 +1773,25 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, tcp_recv_skb(sk, seq, &offset); tcp_cleanup_rbuf(sk, copied); } +out: return copied; } + +int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor) +{ + return __tcp_read_sock(sk, desc, recv_actor, false, + &tcp_sk(sk)->copied_seq); +} EXPORT_SYMBOL(tcp_read_sock); +int tcp_read_sock_noack(struct sock *sk, read_descriptor_t *desc, + sk_read_actor_t recv_actor, bool noack, + u32 *copied_seq) +{ + return __tcp_read_sock(sk, desc, recv_actor, noack, copied_seq); +} + int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor) { struct sk_buff *skb; @@ -3184,7 +3203,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); - dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); + dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL))); tcp_saved_syn_free(tp); tp->compressed_ack = 0; tp->segs_in = 0; @@ -4726,6 +4745,12 @@ int tcp_abort(struct sock *sk, int err) /* Don't race with userspace socket closes such as tcp_close. */ lock_sock(sk); + /* Avoid closing the same socket twice. */ + if (sk->sk_state == TCP_CLOSE) { + release_sock(sk); + return -ENOENT; + } + if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); inet_csk_listen_stop(sk); @@ -4735,19 +4760,12 @@ int tcp_abort(struct sock *sk, int err) local_bh_disable(); bh_lock_sock(sk); - if (!sock_flag(sk, SOCK_DEAD)) { - WRITE_ONCE(sk->sk_err, err); - /* This barrier is coupled with smp_rmb() in tcp_poll() */ - smp_wmb(); - sk_error_report(sk); - if (tcp_need_reset(sk->sk_state)) - tcp_send_active_reset(sk, GFP_ATOMIC); - tcp_done(sk); - } + if (tcp_need_reset(sk->sk_state)) + tcp_send_active_reset(sk, GFP_ATOMIC); + tcp_done_with_error(sk, err); bh_unlock_sock(sk); local_bh_enable(); - tcp_write_queue_purge(sk); release_sock(sk); return 0; } diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c index 07a896685d0d..bf10fa3c37b7 100644 --- a/net/ipv4/tcp_bpf.c +++ b/net/ipv4/tcp_bpf.c @@ -49,13 +49,14 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, sge = sk_msg_elem(msg, i); size = (apply && apply_bytes < sge->length) ? apply_bytes : sge->length; - if (!sk_wmem_schedule(sk, size)) { + if (!__sk_rmem_schedule(sk, size, false)) { if (!copied) ret = -ENOMEM; break; } sk_mem_charge(sk, size); + atomic_add(size, &sk->sk_rmem_alloc); sk_msg_xfer(tmp, msg, i, size); copied += size; if (sge->length) @@ -74,7 +75,8 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock, if (!ret) { msg->sg.start = i; - sk_psock_queue_msg(psock, tmp); + if (!sk_psock_queue_msg(psock, tmp)) + atomic_sub(copied, &sk->sk_rmem_alloc); sk_psock_data_ready(sk, psock); } else { sk_msg_free(sk, tmp); @@ -216,11 +218,11 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, int flags, int *addr_len) { - struct tcp_sock *tcp = tcp_sk(sk); int peek = flags & MSG_PEEK; - u32 seq = tcp->copied_seq; struct sk_psock *psock; + struct tcp_sock *tcp; int copied = 0; + u32 seq; if (unlikely(flags & MSG_ERRQUEUE)) return inet_recv_error(sk, msg, len, addr_len); @@ -233,7 +235,8 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk, return tcp_recvmsg(sk, msg, len, flags, addr_len); lock_sock(sk); - + tcp = tcp_sk(sk); + seq = tcp->copied_seq; /* We may have received data on the sk_receive_queue pre-accept and * then we can not use read_skb in this context because we haven't * assigned a sk_socket yet so have no link to the ops. The work-around @@ -435,7 +438,6 @@ more_data: cork = true; psock->cork = NULL; } - sk_msg_return(sk, msg, tosend); release_sock(sk); origsize = msg->sg.size; @@ -447,8 +449,9 @@ more_data: sock_put(sk_redir); lock_sock(sk); + sk_mem_uncharge(sk, sent); if (unlikely(ret < 0)) { - int free = sk_msg_free_nocharge(sk, msg); + int free = sk_msg_free(sk, msg); if (!cork) *copied -= free; @@ -462,7 +465,7 @@ more_data: break; case __SK_DROP: default: - sk_msg_free_partial(sk, msg, tosend); + sk_msg_free(sk, msg); sk_msg_apply_bytes(psock, tosend); *copied -= (tosend + delta); return -EACCES; @@ -478,11 +481,8 @@ more_data: } if (msg && msg->sg.data[msg->sg.start].page_link && - msg->sg.data[msg->sg.start].length) { - if (eval == __SK_REDIRECT) - sk_mem_charge(sk, tosend - sent); + msg->sg.data[msg->sg.start].length) goto more_data; - } } return ret; } @@ -691,6 +691,42 @@ static int tcp_bpf_assert_proto_ops(struct proto *ops) ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP; } +#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) +int tcp_bpf_strp_read_sock(struct strparser *strp, read_descriptor_t *desc, + sk_read_actor_t recv_actor) +{ + struct sock *sk = strp->sk; + struct sk_psock *psock; + struct tcp_sock *tp; + int copied = 0; + + tp = tcp_sk(sk); + rcu_read_lock(); + psock = sk_psock(sk); + if (WARN_ON_ONCE(!psock)) { + desc->error = -EINVAL; + goto out; + } + + psock->ingress_bytes = 0; + copied = tcp_read_sock_noack(sk, desc, recv_actor, true, + &psock->copied_seq); + if (copied < 0) + goto out; + /* recv_actor may redirect skb to another socket (SK_REDIRECT) or + * just put skb into ingress queue of current socket (SK_PASS). + * For SK_REDIRECT, we need to ack the frame immediately but for + * SK_PASS, we want to delay the ack until tcp_bpf_recvmsg_parser(). + */ + tp->copied_seq = psock->copied_seq - psock->ingress_bytes; + tcp_rcv_space_adjust(sk); + __tcp_cleanup_rbuf(sk, copied - psock->ingress_bytes); +out: + rcu_read_unlock(); + return copied; +} +#endif /* CONFIG_BPF_STREAM_PARSER */ + int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore) { int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4; diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index 768c10c1f649..b7c140874d97 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -392,6 +392,10 @@ static void hystart_update(struct sock *sk, u32 delay) if (after(tp->snd_una, ca->end_seq)) bictcp_hystart_reset(sk); + /* hystart triggers when cwnd is larger than some threshold */ + if (tcp_snd_cwnd(tp) < hystart_low_window) + return; + if (hystart_detect & HYSTART_ACK_TRAIN) { u32 now = bictcp_clock_us(sk); @@ -467,9 +471,7 @@ static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample) if (ca->delay_min == 0 || ca->delay_min > delay) ca->delay_min = delay; - /* hystart triggers when cwnd is larger than some threshold */ - if (!ca->found && tcp_in_slow_start(tp) && hystart && - tcp_snd_cwnd(tp) >= hystart_low_window) + if (!ca->found && tcp_in_slow_start(tp) && hystart) hystart_update(sk, delay); } diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 85e4953f1182..cb01c770d8cf 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -49,7 +49,7 @@ void tcp_fastopen_ctx_destroy(struct net *net) { struct tcp_fastopen_context *ctxt; - ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL); + ctxt = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, NULL)); if (ctxt) call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); @@ -80,9 +80,10 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, if (sk) { q = &inet_csk(sk)->icsk_accept_queue.fastopenq; - octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx); + octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx))); } else { - octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx); + octx = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, + RCU_INITIALIZER(ctx))); } if (octx) @@ -177,7 +178,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) if (!skb) return; - skb_dst_drop(skb); + tcp_cleanup_skb(skb); /* segs_in has been initialized to 1 in tcp_create_openreq_child(). * Hence, reset segs_in to 0 before calling tcp_segs_in() * to avoid double counting. Also, tcp_segs_in() expects @@ -194,7 +195,7 @@ void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN; tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); tp->syn_data_acked = 1; /* u64_stats_update_begin(&tp->syncp) not needed here, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9ac47ccfe120..db1a99df29d5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -404,6 +404,20 @@ static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr return false; } +static void tcp_count_delivered_ce(struct tcp_sock *tp, u32 ecn_count) +{ + tp->delivered_ce += ecn_count; +} + +/* Updates the delivered and delivered_ce counts */ +static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, + bool ece_ack) +{ + tp->delivered += delivered; + if (ece_ack) + tcp_count_delivered_ce(tp, delivered); +} + /* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. @@ -1119,15 +1133,6 @@ void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb) } } -/* Updates the delivered and delivered_ce counts */ -static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, - bool ece_ack) -{ - tp->delivered += delivered; - if (ece_ack) - tp->delivered_ce += delivered; -} - /* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). @@ -3783,12 +3788,23 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) } } -static inline void tcp_in_ack_event(struct sock *sk, u32 flags) +static void tcp_in_ack_event(struct sock *sk, int flag) { const struct inet_connection_sock *icsk = inet_csk(sk); - if (icsk->icsk_ca_ops->in_ack_event) - icsk->icsk_ca_ops->in_ack_event(sk, flags); + if (icsk->icsk_ca_ops->in_ack_event) { + u32 ack_ev_flags = 0; + + if (flag & FLAG_WIN_UPDATE) + ack_ev_flags |= CA_ACK_WIN_UPDATE; + if (flag & FLAG_SLOWPATH) { + ack_ev_flags |= CA_ACK_SLOWPATH; + if (flag & FLAG_ECE) + ack_ev_flags |= CA_ACK_ECE; + } + + icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags); + } } /* Congestion control has updated the cwnd already. So if we're in @@ -3905,12 +3921,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_snd_una_update(tp, ack); flag |= FLAG_WIN_UPDATE; - tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS); } else { - u32 ack_ev_flags = CA_ACK_SLOWPATH; - if (ack_seq != TCP_SKB_CB(skb)->end_seq) flag |= FLAG_DATA; else @@ -3922,19 +3934,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una, &sack_state); - if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { + if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) flag |= FLAG_ECE; - ack_ev_flags |= CA_ACK_ECE; - } if (sack_state.sack_delivered) tcp_count_delivered(tp, sack_state.sack_delivered, flag & FLAG_ECE); - - if (flag & FLAG_WIN_UPDATE) - ack_ev_flags |= CA_ACK_WIN_UPDATE; - - tcp_in_ack_event(sk, ack_ev_flags); } /* This is a deviation from RFC3168 since it states that: @@ -3961,6 +3966,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_rack_update_reo_wnd(sk, &rs); + tcp_in_ack_event(sk, flag); + if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); @@ -3992,6 +3999,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) return 1; no_queue: + tcp_in_ack_event(sk, flag); /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) { tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag, @@ -4836,7 +4844,7 @@ static void tcp_ofo_queue(struct sock *sk) tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; if (!eaten) - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); else kfree_skb_partial(skb, fragstolen); @@ -5027,7 +5035,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, skb, fragstolen)) ? 1 : 0; tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq); if (!eaten) { - __skb_queue_tail(&sk->sk_receive_queue, skb); + tcp_add_receive_queue(sk, skb); skb_set_owner_r(skb, sk); } return eaten; @@ -5110,7 +5118,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) __kfree_skb(skb); return; } - skb_dst_drop(skb); + tcp_cleanup_skb(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); reason = SKB_DROP_REASON_NOT_SPECIFIED; @@ -6041,7 +6049,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS); /* Bulk data transfer: receiver */ - skb_dst_drop(skb); + tcp_cleanup_skb(skb); __skb_pull(skb, tcp_header_len); eaten = tcp_queue_rcv(sk, skb, &fragstolen); @@ -7124,6 +7132,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req, req->timeout))) { reqsk_free(req); + dst_release(dst); return 0; } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 984435cb1013..7647f1ec0584 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -832,7 +832,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) sock_net_set(ctl_sk, net); if (sk) { ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? - inet_twsk(sk)->tw_mark : sk->sk_mark; + inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark); ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ? inet_twsk(sk)->tw_priority : sk->sk_priority; transmit_time = tcp_transmit_time(sk); @@ -1791,7 +1791,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, */ skb_condense(skb); - skb_dst_drop(skb); + tcp_cleanup_skb(skb); if (unlikely(tcp_checksum_complete(skb))) { bh_unlock_sock(sk); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index c562cb965e74..bc94df0140bf 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -735,12 +735,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, /* In sequence, PAWS is OK. */ - /* TODO: We probably should defer ts_recent change once - * we take ownership of @req. - */ - if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) - WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval); - if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { /* Truncate SYN, it is out of window starting at tcp_rsk(req)->rcv_isn + 1. */ @@ -789,6 +783,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (!child) goto listen_overflow; + if (own_req && tmp_opt.saw_tstamp && + !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) + tcp_sk(child)->rx_opt.ts_recent = tmp_opt.rcv_tsval; + if (own_req && rsk_drop_req(req)) { reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req); diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 72a645bf05c9..ce84073e0b7b 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -12,12 +12,15 @@ #include #include -static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, +static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb, unsigned int seq, unsigned int mss) { + u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP; + u32 ts_seq = skb_shinfo(gso_skb)->tskey; + while (skb) { if (before(ts_seq, seq + mss)) { - skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; + skb_shinfo(skb)->tx_flags |= flags; skb_shinfo(skb)->tskey = ts_seq; return; } @@ -119,8 +122,8 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, th = tcp_hdr(skb); seq = ntohl(th->seq); - if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) - tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); + if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP)) + tcp_gso_tstamp(segs, gso_skb, seq, mss); newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta)); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f54b76bc281..40568365cdb3 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -827,8 +827,10 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, unsigned int size; if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) { - opts->options |= OPTION_MPTCP; - remaining -= size; + if (remaining >= size) { + opts->options |= OPTION_MPTCP; + remaining -= size; + } } } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 53267566808c..da96252dd8a1 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -939,9 +939,9 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, const int hlen = skb_network_header_len(skb) + sizeof(struct udphdr); - if (hlen + cork->gso_size > cork->fragsize) { + if (hlen + min(datalen, cork->gso_size) > cork->fragsize) { kfree_skb(skb); - return -EINVAL; + return -EMSGSIZE; } if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { kfree_skb(skb); @@ -1437,12 +1437,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb) } /* fully reclaim rmem/fwd memory allocated for skb */ -static void udp_rmem_release(struct sock *sk, int size, int partial, - bool rx_queue_lock_held) +static void udp_rmem_release(struct sock *sk, unsigned int size, + int partial, bool rx_queue_lock_held) { struct udp_sock *up = udp_sk(sk); struct sk_buff_head *sk_queue; - int amt; + unsigned int amt; if (likely(partial)) { up->forward_deficit += size; @@ -1462,10 +1462,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial, if (!rx_queue_lock_held) spin_lock(&sk_queue->lock); - - sk_forward_alloc_add(sk, size); - amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); - sk_forward_alloc_add(sk, -amt); + amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); + sk_forward_alloc_add(sk, size - amt); if (amt) __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT); @@ -1650,7 +1648,7 @@ EXPORT_SYMBOL_GPL(skb_consume_udp); static struct sk_buff *__first_packet_length(struct sock *sk, struct sk_buff_head *rcvq, - int *total) + unsigned int *total) { struct sk_buff *skb; @@ -1683,8 +1681,8 @@ static int first_packet_length(struct sock *sk) { struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; struct sk_buff_head *sk_queue = &sk->sk_receive_queue; + unsigned int total = 0; struct sk_buff *skb; - int total = 0; int res; spin_lock_bh(&rcvq->lock); @@ -2252,7 +2250,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old; if (dst_hold_safe(dst)) { - old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); + old = unrcu_pointer(xchg(&sk->sk_rx_dst, RCU_INITIALIZER(dst))); dst_release(old); return old != dst; } diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 1456c8c2b8db..d415b4fb2f1f 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -246,6 +246,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs) return segs; } +static void __udpv6_gso_segment_csum(struct sk_buff *seg, + struct in6_addr *oldip, + const struct in6_addr *newip, + __be16 *oldport, __be16 newport) +{ + struct udphdr *uh = udp_hdr(seg); + + if (ipv6_addr_equal(oldip, newip) && *oldport == newport) + return; + + if (uh->check) { + inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32, + newip->s6_addr32, true); + + inet_proto_csum_replace2(&uh->check, seg, *oldport, newport, + false); + if (!uh->check) + uh->check = CSUM_MANGLED_0; + } + + *oldip = *newip; + *oldport = newport; +} + +static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs) +{ + const struct ipv6hdr *iph; + const struct udphdr *uh; + struct ipv6hdr *iph2; + struct sk_buff *seg; + struct udphdr *uh2; + + seg = segs; + uh = udp_hdr(seg); + iph = ipv6_hdr(seg); + uh2 = udp_hdr(seg->next); + iph2 = ipv6_hdr(seg->next); + + if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) && + ipv6_addr_equal(&iph->saddr, &iph2->saddr) && + ipv6_addr_equal(&iph->daddr, &iph2->daddr)) + return segs; + + while ((seg = seg->next)) { + uh2 = udp_hdr(seg); + iph2 = ipv6_hdr(seg); + + __udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr, + &uh2->source, uh->source); + __udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr, + &uh2->dest, uh->dest); + } + + return segs; +} + static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, netdev_features_t features, bool is_ipv6) @@ -258,7 +314,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb, udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); - return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb); + if (is_ipv6) + return __udpv6_gso_segment_list_csum(skb); + else + return __udpv4_gso_segment_list_csum(skb); } struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, @@ -314,13 +373,17 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, /* clear destructor to avoid skb_segment assigning it to tail */ copy_dtor = gso_skb->destructor == sock_wfree; - if (copy_dtor) + if (copy_dtor) { gso_skb->destructor = NULL; + gso_skb->sk = NULL; + } segs = skb_segment(gso_skb, features); if (IS_ERR_OR_NULL(segs)) { - if (copy_dtor) + if (copy_dtor) { gso_skb->destructor = sock_wfree; + gso_skb->sk = sk; + } return segs; } diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 4e1e6ef72464..40ee2d6ef229 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2527,6 +2527,24 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev) return idev; } +static void delete_tempaddrs(struct inet6_dev *idev, + struct inet6_ifaddr *ifp) +{ + struct inet6_ifaddr *ift, *tmp; + + write_lock_bh(&idev->lock); + list_for_each_entry_safe(ift, tmp, &idev->tempaddr_list, tmp_list) { + if (ift->ifpub != ifp) + continue; + + in6_ifa_hold(ift); + write_unlock_bh(&idev->lock); + ipv6_del_addr(ift); + write_lock_bh(&idev->lock); + } + write_unlock_bh(&idev->lock); +} + static void manage_tempaddrs(struct inet6_dev *idev, struct inet6_ifaddr *ifp, __u32 valid_lft, __u32 prefered_lft, @@ -3051,11 +3069,12 @@ static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags, in6_ifa_hold(ifp); read_unlock_bh(&idev->lock); - if (!(ifp->flags & IFA_F_TEMPORARY) && - (ifa_flags & IFA_F_MANAGETEMPADDR)) - manage_tempaddrs(idev, ifp, 0, 0, false, - jiffies); ipv6_del_addr(ifp); + + if (!(ifp->flags & IFA_F_TEMPORARY) && + (ifp->flags & IFA_F_MANAGETEMPADDR)) + delete_tempaddrs(idev, ifp); + addrconf_verify_rtnl(net); if (ipv6_addr_is_multicast(pfx)) { ipv6_mc_config(net->ipv6.mc_autojoin_sk, @@ -3143,16 +3162,13 @@ static void add_v4_addrs(struct inet6_dev *idev) struct in6_addr addr; struct net_device *dev; struct net *net = dev_net(idev->dev); - int scope, plen, offset = 0; + int scope, plen; u32 pflags = 0; ASSERT_RTNL(); memset(&addr, 0, sizeof(struct in6_addr)); - /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */ - if (idev->dev->addr_len == sizeof(struct in6_addr)) - offset = sizeof(struct in6_addr) - 4; - memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4); + memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4); if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) { scope = IPV6_ADDR_COMPATv4; @@ -3462,7 +3478,13 @@ static void addrconf_gre_config(struct net_device *dev) return; } - if (dev->type == ARPHRD_ETHER) { + /* Generate the IPv6 link-local address using addrconf_addr_gen(), + * unless we have an IPv4 GRE device not bound to an IP address and + * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this + * case). Such devices fall back to add_v4_addrs() instead. + */ + if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 && + idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) { addrconf_addr_gen(idev, true); return; } @@ -4863,14 +4885,12 @@ static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp, } if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) { - if (was_managetempaddr && - !(ifp->flags & IFA_F_MANAGETEMPADDR)) { - cfg->valid_lft = 0; - cfg->preferred_lft = 0; - } - manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft, - cfg->preferred_lft, !was_managetempaddr, - jiffies); + if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR)) + delete_tempaddrs(ifp->idev, ifp); + else + manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft, + cfg->preferred_lft, !was_managetempaddr, + jiffies); } addrconf_verify_rtnl(net); @@ -5701,6 +5721,27 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, } } +static int inet6_fill_ifla6_stats_attrs(struct sk_buff *skb, + struct inet6_dev *idev) +{ + struct nlattr *nla; + + nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); + if (!nla) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); + + nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); + if (!nla) + goto nla_put_failure; + snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, u32 ext_filter_mask) { @@ -5722,18 +5763,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev, /* XXX - MC not implemented */ - if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS) - return 0; - - nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64)); - if (!nla) - goto nla_put_failure; - snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla)); - - nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64)); - if (!nla) - goto nla_put_failure; - snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla)); + if (!(ext_filter_mask & RTEXT_FILTER_SKIP_STATS)) { + if (inet6_fill_ifla6_stats_attrs(skb, idev) < 0) + goto nla_put_failure; + } nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr)); if (!nla) diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index bed9e94868b2..ec4526911e3a 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -260,31 +260,29 @@ lookup_protocol: */ inet->inet_sport = htons(inet->inet_num); err = sk->sk_prot->hash(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } if (sk->sk_prot->init) { err = sk->sk_prot->init(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } if (!kern) { err = BPF_CGROUP_RUN_PROG_INET_SOCK(sk); - if (err) { - sk_common_release(sk); - goto out; - } + if (err) + goto out_sk_release; } out: return err; out_rcu_unlock: rcu_read_unlock(); goto out; +out_sk_release: + sk_common_release(sk); + sock->sk = NULL; + goto out; } static int __inet6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len, @@ -513,7 +511,7 @@ void inet6_destroy_sock(struct sock *sk) /* Free tx options */ - opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); + opt = unrcu_pointer(xchg(&np->opt, NULL)); if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 1578ed9e97d8..c07e3da08d2a 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c @@ -1075,8 +1075,13 @@ static int calipso_sock_getattr(struct sock *sk, struct ipv6_opt_hdr *hop; int opt_len, len, ret_val = -ENOMSG, offset; unsigned char *opt; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + if (!pinfo) + return -EAFNOSUPPORT; + + txopts = txopt_get(pinfo); if (!txopts || !txopts->hopopt) goto done; @@ -1128,8 +1133,13 @@ static int calipso_sock_setattr(struct sock *sk, { int ret_val; struct ipv6_opt_hdr *old, *new; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + if (!pinfo) + return -EAFNOSUPPORT; + + txopts = txopt_get(pinfo); old = NULL; if (txopts) old = txopts->hopopt; @@ -1156,8 +1166,13 @@ static int calipso_sock_setattr(struct sock *sk, static void calipso_sock_delattr(struct sock *sk) { struct ipv6_opt_hdr *new_hop; - struct ipv6_txoptions *txopts = txopt_get(inet6_sk(sk)); + struct ipv6_pinfo *pinfo = inet6_sk(sk); + struct ipv6_txoptions *txopts; + if (!pinfo) + return; + + txopts = txopt_get(pinfo); if (!txopts || !txopts->hopopt) goto done; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index a021c88d3d9b..085a83b807af 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -135,47 +135,16 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) } #ifdef CONFIG_INET6_ESPINTCP -struct esp_tcp_sk { - struct sock *sk; - struct rcu_head rcu; -}; - -static void esp_free_tcp_sk(struct rcu_head *head) -{ - struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); - - sock_put(esk->sk); - kfree(esk); -} - static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) { struct xfrm_encap_tmpl *encap = x->encap; struct net *net = xs_net(x); - struct esp_tcp_sk *esk; __be16 sport, dport; - struct sock *nsk; struct sock *sk; - sk = rcu_dereference(x->encap_sk); - if (sk && sk->sk_state == TCP_ESTABLISHED) - return sk; - spin_lock_bh(&x->lock); sport = encap->encap_sport; dport = encap->encap_dport; - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (sk && sk == nsk) { - esk = kmalloc(sizeof(*esk), GFP_ATOMIC); - if (!esk) { - spin_unlock_bh(&x->lock); - return ERR_PTR(-ENOMEM); - } - RCU_INIT_POINTER(x->encap_sk, NULL); - esk->sk = sk; - call_rcu(&esk->rcu, esp_free_tcp_sk); - } spin_unlock_bh(&x->lock); sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo, &x->id.daddr.in6, @@ -188,20 +157,6 @@ static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) return ERR_PTR(-EINVAL); } - spin_lock_bh(&x->lock); - nsk = rcu_dereference_protected(x->encap_sk, - lockdep_is_held(&x->lock)); - if (encap->encap_sport != sport || - encap->encap_dport != dport) { - sock_put(sk); - sk = nsk ?: ERR_PTR(-EREMCHG); - } else if (sk == nsk) { - sock_put(sk); - } else { - rcu_assign_pointer(x->encap_sk, sk); - } - spin_unlock_bh(&x->lock); - return sk; } @@ -224,6 +179,8 @@ static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) err = espintcp_push_skb(sk, skb); bh_unlock_sock(sk); + sock_put(sk); + out: rcu_read_unlock(); return err; @@ -427,6 +384,8 @@ static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, if (IS_ERR(sk)) return ERR_CAST(sk); + sock_put(sk); + *lenp = htons(len); esph = (struct ip_esp_hdr *)(lenp + 1); diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c index 6eeab21512ba..e0f0c5f8cccd 100644 --- a/net/ipv6/fib6_rules.c +++ b/net/ipv6/fib6_rules.c @@ -350,9 +350,9 @@ static int fib6_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct nlattr **tb, struct netlink_ext_ack *extack) { + struct fib6_rule *rule6 = (struct fib6_rule *)rule; + struct net *net = rule->fr_net; int err = -EINVAL; - struct net *net = sock_net(skb->sk); - struct fib6_rule *rule6 = (struct fib6_rule *) rule; if (!inet_validate_dscp(frh->tos)) { NL_SET_ERR_MSG(extack, diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ed8cdf7b8b11..7d88fd314c39 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -222,10 +222,10 @@ static bool icmpv6_xrlim_allow(struct sock *sk, u8 type, if (rt->rt6i_dst.plen < 128) tmo >>= ((128 - rt->rt6i_dst.plen)>>5); - peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1); + rcu_read_lock(); + peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr); res = inet_peer_xrlim_allow(peer, tmo); - if (peer) - inet_putpeer(peer); + rcu_read_unlock(); } if (!res) __ICMP6_INC_STATS(net, ip6_dst_idev(dst), diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c index 9d37f7164e73..7397f764c66c 100644 --- a/net/ipv6/ila/ila_lwt.c +++ b/net/ipv6/ila/ila_lwt.c @@ -88,13 +88,15 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb) goto drop; } - if (ilwt->connected) { + /* cache only if we don't create a dst reference loop */ + if (ilwt->connected && orig_dst->lwtstate != dst->lwtstate) { local_bh_disable(); dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr); local_bh_enable(); } } + skb_dst_drop(skb); skb_dst_set(skb, dst); return dst_output(net, sk, skb); diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c index 2e7a36a1ea0a..8483116dfa23 100644 --- a/net/ipv6/ila/ila_xlat.c +++ b/net/ipv6/ila/ila_xlat.c @@ -201,6 +201,8 @@ static const struct nf_hook_ops ila_nf_hook_ops[] = { }, }; +static DEFINE_MUTEX(ila_mutex); + static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) { struct ila_net *ilan = net_generic(net, ila_net_id); @@ -208,16 +210,20 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp) spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match); int err = 0, order; - if (!ilan->xlat.hooks_registered) { + if (!READ_ONCE(ilan->xlat.hooks_registered)) { /* We defer registering net hooks in the namespace until the * first mapping is added. */ - err = nf_register_net_hooks(net, ila_nf_hook_ops, - ARRAY_SIZE(ila_nf_hook_ops)); + mutex_lock(&ila_mutex); + if (!ilan->xlat.hooks_registered) { + err = nf_register_net_hooks(net, ila_nf_hook_ops, + ARRAY_SIZE(ila_nf_hook_ops)); + if (!err) + WRITE_ONCE(ilan->xlat.hooks_registered, true); + } + mutex_unlock(&ila_mutex); if (err) return err; - - ilan->xlat.hooks_registered = true; } ila = kzalloc(sizeof(*ila), GFP_KERNEL); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 1123594ad2be..b6a7cbd6bee0 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -983,7 +983,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh, if (pcpu_rt && rcu_access_pointer(pcpu_rt->from) == match) { struct fib6_info *from; - from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); + from = unrcu_pointer(xchg(&pcpu_rt->from, NULL)); fib6_info_release(from); } } @@ -1180,8 +1180,8 @@ next_iter: while (sibling) { if (sibling->fib6_metric == rt->fib6_metric && rt6_qualify_for_ecmp(sibling)) { - list_add_tail(&rt->fib6_siblings, - &sibling->fib6_siblings); + list_add_tail_rcu(&rt->fib6_siblings, + &sibling->fib6_siblings); break; } sibling = rcu_dereference_protected(sibling->fib6_next, @@ -1242,7 +1242,7 @@ add: fib6_siblings) sibling->fib6_nsiblings--; rt->fib6_nsiblings = 0; - list_del_init(&rt->fib6_siblings); + list_del_rcu(&rt->fib6_siblings); rt6_multipath_rebalance(next_sibling); return err; } @@ -1955,7 +1955,7 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, &rt->fib6_siblings, fib6_siblings) sibling->fib6_nsiblings--; rt->fib6_nsiblings = 0; - list_del_init(&rt->fib6_siblings); + list_del_rcu(&rt->fib6_siblings); rt6_multipath_rebalance(next_sibling); } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index f2227e662d1c..cfc276e5a249 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -610,15 +610,15 @@ int ip6_forward(struct sk_buff *skb) else target = &hdr->daddr; - peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1); + rcu_read_lock(); + peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr); /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (inet_peer_xrlim_allow(peer, 1*HZ)) ndisc_send_redirect(skb, target); - if (peer) - inet_putpeer(peer); + rcu_read_unlock(); } else { int addrtype = ipv6_addr_type(&hdr->saddr); @@ -1450,6 +1450,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, } v6_cork->hop_limit = ipc6->hlimit; v6_cork->tclass = ipc6->tclass; + v6_cork->dontfrag = ipc6->dontfrag; if (rt->dst.flags & DST_XFRM_TUNNEL) mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ? READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst); @@ -1483,7 +1484,7 @@ static int __ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, size_t length, int transhdrlen, - unsigned int flags, struct ipcm6_cookie *ipc6) + unsigned int flags) { struct sk_buff *skb, *skb_prev = NULL; struct inet_cork *cork = &cork_full->base; @@ -1499,11 +1500,11 @@ static int __ip6_append_data(struct sock *sk, bool zc = false; u32 tskey = 0; struct rt6_info *rt = (struct rt6_info *)cork->dst; + bool paged, hold_tskey, extra_uref = false; struct ipv6_txoptions *opt = v6_cork->opt; int csummode = CHECKSUM_NONE; unsigned int maxnonfragsize, headersize; unsigned int wmem_alloc_delta = 0; - bool paged, extra_uref = false; skb = skb_peek_tail(queue); if (!skb) { @@ -1515,10 +1516,6 @@ static int __ip6_append_data(struct sock *sk, mtu = cork->gso_size ? IP6_MAX_MTU : cork->fragsize; orig_mtu = mtu; - if (cork->tx_flags & SKBTX_ANY_TSTAMP && - READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) - tskey = atomic_inc_return(&sk->sk_tskey) - 1; - hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + @@ -1543,7 +1540,7 @@ static int __ip6_append_data(struct sock *sk, if (headersize + transhdrlen > mtu) goto emsgsize; - if (cork->length + length > mtu - headersize && ipc6->dontfrag && + if (cork->length + length > mtu - headersize && v6_cork->dontfrag && (sk->sk_protocol == IPPROTO_UDP || sk->sk_protocol == IPPROTO_ICMPV6 || sk->sk_protocol == IPPROTO_RAW)) { @@ -1606,6 +1603,11 @@ emsgsize: } } + hold_tskey = cork->tx_flags & SKBTX_ANY_TSTAMP && + READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID; + if (hold_tskey) + tskey = atomic_inc_return(&sk->sk_tskey) - 1; + /* * Let's try using as much space as possible. * Use MTU if total length of the message fits into the MTU. @@ -1844,6 +1846,8 @@ error: cork->length -= length; IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); + if (hold_tskey) + atomic_dec(&sk->sk_tskey); return err; } @@ -1881,7 +1885,7 @@ int ip6_append_data(struct sock *sk, return __ip6_append_data(sk, &sk->sk_write_queue, &inet->cork, &np->cork, sk_page_frag(sk), getfrag, - from, length, transhdrlen, flags, ipc6); + from, length, transhdrlen, flags); } EXPORT_SYMBOL_GPL(ip6_append_data); @@ -1982,7 +1986,8 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); u8 icmp6_type; - if (sk->sk_socket->type == SOCK_RAW && !inet_sk(sk)->hdrincl) + if (sk->sk_socket->type == SOCK_RAW && + !(fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH)) icmp6_type = fl6->fl6_icmp_type; else icmp6_type = icmp6_hdr(skb)->icmp6_type; @@ -2085,7 +2090,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, err = __ip6_append_data(sk, &queue, cork, &v6_cork, ¤t->task_frag, getfrag, from, length + exthdrlen, transhdrlen + exthdrlen, - flags, ipc6); + flags); if (err) { __ip6_flush_pending_frames(sk, &queue, cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index f3324f2a4046..a82d382193e4 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -628,8 +628,8 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, } skb_dst_set(skb2, &rt->dst); } else { - if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, - skb2->dev) || + if (ip_route_input(skb2, eiph->daddr, eiph->saddr, + ip4h_dscp(eiph), skb2->dev) || skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6) goto out; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 27fb5479988a..138f6aee70af 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -125,7 +125,7 @@ static struct mr_table *ip6mr_mr_table_iter(struct net *net, return ret; } -static struct mr_table *ip6mr_get_table(struct net *net, u32 id) +static struct mr_table *__ip6mr_get_table(struct net *net, u32 id) { struct mr_table *mrt; @@ -136,6 +136,16 @@ static struct mr_table *ip6mr_get_table(struct net *net, u32 id) return NULL; } +static struct mr_table *ip6mr_get_table(struct net *net, u32 id) +{ + struct mr_table *mrt; + + rcu_read_lock(); + mrt = __ip6mr_get_table(net, id); + rcu_read_unlock(); + return mrt; +} + static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, struct mr_table **mrt) { @@ -177,7 +187,7 @@ static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, arg->table = fib_rule_get_table(rule, arg); - mrt = ip6mr_get_table(rule->fr_net, arg->table); + mrt = __ip6mr_get_table(rule->fr_net, arg->table); if (!mrt) return -EAGAIN; res->mrt = mrt; @@ -304,6 +314,8 @@ static struct mr_table *ip6mr_get_table(struct net *net, u32 id) return net->ipv6.mrt6; } +#define __ip6mr_get_table ip6mr_get_table + static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, struct mr_table **mrt) { @@ -382,7 +394,7 @@ static struct mr_table *ip6mr_new_table(struct net *net, u32 id) { struct mr_table *mrt; - mrt = ip6mr_get_table(net, id); + mrt = __ip6mr_get_table(net, id); if (mrt) return mrt; @@ -411,13 +423,15 @@ static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos) struct net *net = seq_file_net(seq); struct mr_table *mrt; - mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); - if (!mrt) + rcu_read_lock(); + mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT); + if (!mrt) { + rcu_read_unlock(); return ERR_PTR(-ENOENT); + } iter->mrt = mrt; - rcu_read_lock(); return mr_vif_seq_start(seq, pos); } @@ -2288,11 +2302,13 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, struct mfc6_cache *cache; struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); - mrt = ip6mr_get_table(net, RT6_TABLE_DFLT); - if (!mrt) - return -ENOENT; - rcu_read_lock(); + mrt = __ip6mr_get_table(net, RT6_TABLE_DFLT); + if (!mrt) { + rcu_read_unlock(); + return -ENOENT; + } + cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); if (!cache && skb->dev) { int vif = ip6mr_find_vif(mrt, skb->dev); @@ -2573,7 +2589,7 @@ static int ip6mr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, grp = nla_get_in6_addr(tb[RTA_DST]); tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0; - mrt = ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT); + mrt = __ip6mr_get_table(net, tableid ?: RT_TABLE_DEFAULT); if (!mrt) { NL_SET_ERR_MSG_MOD(extack, "MR table does not exist"); return -ENOENT; @@ -2618,7 +2634,7 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb) if (filter.table_id) { struct mr_table *mrt; - mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); + mrt = __ip6mr_get_table(sock_net(skb->sk), filter.table_id); if (!mrt) { if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR) return skb->len; diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 532f4478c884..2924aa45adbc 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -111,8 +111,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); } } - opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt, - opt); + opt = unrcu_pointer(xchg(&inet6_sk(sk)->opt, RCU_INITIALIZER(opt))); sk_dst_reset(sk); return opt; diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index a77769538940..a1b3f3e7921f 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1731,21 +1731,19 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) struct net_device *dev = idev->dev; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; - struct net *net = dev_net(dev); const struct in6_addr *saddr; struct in6_addr addr_buf; struct mld2_report *pmr; struct sk_buff *skb; unsigned int size; struct sock *sk; - int err; + struct net *net; - sk = net->ipv6.igmp_sk; /* we assume size > sizeof(ra) here * Also try to not allocate high-order pages for big MTU */ size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen; - skb = sock_alloc_send_skb(sk, size, 1, &err); + skb = alloc_skb(size, GFP_KERNEL); if (!skb) return NULL; @@ -1753,6 +1751,12 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) skb_reserve(skb, hlen); skb_tailroom_reserve(skb, mtu, tlen); + rcu_read_lock(); + + net = dev_net_rcu(dev); + sk = net->ipv6.igmp_sk; + skb_set_owner_w(skb, sk); + if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { /* : * use unspecified address as the source address @@ -1764,6 +1768,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0); + rcu_read_unlock(); + skb_put_data(skb, ra, sizeof(ra)); skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index cfb4cf6e6654..1a6408a24d21 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -418,15 +418,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev, { int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; - struct sock *sk = dev_net(dev)->ipv6.ndisc_sk; struct sk_buff *skb; skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC); - if (!skb) { - ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n", - __func__); + if (!skb) return NULL; - } skb->protocol = htons(ETH_P_IPV6); skb->dev = dev; @@ -437,7 +433,9 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev, /* Manually assign socket ownership as we avoid calling * sock_alloc_send_pskb() to bypass wmem buffer limits */ - skb_set_owner_w(skb, sk); + rcu_read_lock(); + skb_set_owner_w(skb, dev_net_rcu(dev)->ipv6.ndisc_sk); + rcu_read_unlock(); return skb; } @@ -473,16 +471,20 @@ static void ip6_nd_hdr(struct sk_buff *skb, void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr, const struct in6_addr *saddr) { - struct dst_entry *dst = skb_dst(skb); - struct net *net = dev_net(skb->dev); - struct sock *sk = net->ipv6.ndisc_sk; - struct inet6_dev *idev; - int err; struct icmp6hdr *icmp6h = icmp6_hdr(skb); + struct dst_entry *dst = skb_dst(skb); + struct inet6_dev *idev; + struct net *net; + struct sock *sk; + int err; u8 type; type = icmp6h->icmp6_type; + rcu_read_lock(); + + net = dev_net_rcu(skb->dev); + sk = net->ipv6.ndisc_sk; if (!dst) { struct flowi6 fl6; int oif = skb->dev->ifindex; @@ -490,6 +492,7 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr, icmpv6_flow_init(sk, &fl6, type, saddr, daddr, oif); dst = icmp6_dst_alloc(skb->dev, &fl6); if (IS_ERR(dst)) { + rcu_read_unlock(); kfree_skb(skb); return; } @@ -504,7 +507,6 @@ void ndisc_send_skb(struct sk_buff *skb, const struct in6_addr *daddr, ip6_nd_hdr(skb, saddr, daddr, inet6_sk(sk)->hop_limit, skb->len); - rcu_read_lock(); idev = __in6_dev_get(dst->dev); IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); @@ -1684,7 +1686,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) bool ret; if (netif_is_l3_master(skb->dev)) { - dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif); + dev = dev_get_by_index_rcu(dev_net(skb->dev), IPCB(skb)->iif); if (!dev) return; } @@ -1721,10 +1723,12 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target) "Redirect: destination is not a neighbour\n"); goto release; } - peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1); + + rcu_read_lock(); + peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr); ret = inet_peer_xrlim_allow(peer, 1*HZ); - if (peer) - inet_putpeer(peer); + rcu_read_unlock(); + if (!ret) goto release; diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c index a7690ec62325..9ea5ef56cb27 100644 --- a/net/ipv6/netfilter/nf_socket_ipv6.c +++ b/net/ipv6/netfilter/nf_socket_ipv6.c @@ -103,6 +103,10 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, struct sk_buff *data_skb = NULL; int doff = 0; int thoff = 0, tproto; +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + enum ip_conntrack_info ctinfo; + struct nf_conn const *ct; +#endif tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL); if (tproto < 0) { @@ -136,6 +140,25 @@ struct sock *nf_sk_lookup_slow_v6(struct net *net, const struct sk_buff *skb, return NULL; } +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + /* Do the lookup with the original socket address in + * case this is a reply packet of an established + * SNAT-ted connection. + */ + ct = nf_ct_get(skb, &ctinfo); + if (ct && + ((tproto != IPPROTO_ICMPV6 && + ctinfo == IP_CT_ESTABLISHED_REPLY) || + (tproto == IPPROTO_ICMPV6 && + ctinfo == IP_CT_RELATED_REPLY)) && + (ct->status & IPS_SRC_NAT_DONE)) { + daddr = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.in6; + dport = (tproto == IPPROTO_TCP) ? + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.tcp.port : + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u.udp.port; + } +#endif + return nf_socket_get_sock_v6(net, data_skb, doff, tproto, saddr, daddr, sport, dport, indev); } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6a227edf8a8a..4e6b833dc40b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -369,7 +369,7 @@ static void ip6_dst_destroy(struct dst_entry *dst) in6_dev_put(idev); } - from = xchg((__force struct fib6_info **)&rt->from, NULL); + from = unrcu_pointer(xchg(&rt->from, NULL)); fib6_info_release(from); } @@ -378,6 +378,7 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, { struct rt6_info *rt = (struct rt6_info *)dst; struct inet6_dev *idev = rt->rt6i_idev; + struct fib6_info *from; if (idev && idev->dev != blackhole_netdev) { struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev); @@ -387,6 +388,8 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, in6_dev_put(idev); } } + from = unrcu_pointer(xchg(&rt->from, NULL)); + fib6_info_release(from); } static bool __rt6_check_expired(const struct rt6_info *rt) @@ -413,12 +416,37 @@ static bool rt6_check_expired(const struct rt6_info *rt) return false; } +static struct fib6_info * +rt6_multipath_first_sibling_rcu(const struct fib6_info *rt) +{ + struct fib6_info *iter; + struct fib6_node *fn; + + fn = rcu_dereference(rt->fib6_node); + if (!fn) + goto out; + iter = rcu_dereference(fn->leaf); + if (!iter) + goto out; + + while (iter) { + if (iter->fib6_metric == rt->fib6_metric && + rt6_qualify_for_ecmp(iter)) + return iter; + iter = rcu_dereference(iter->fib6_next); + } + +out: + return NULL; +} + void fib6_select_path(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool have_oif_match, const struct sk_buff *skb, int strict) { - struct fib6_info *sibling, *next_sibling; - struct fib6_info *match = res->f6i; + struct fib6_info *first, *match = res->f6i; + struct fib6_info *sibling; + int hash; if (!match->nh && (!match->fib6_nsiblings || have_oif_match)) goto out; @@ -441,16 +469,25 @@ void fib6_select_path(const struct net *net, struct fib6_result *res, return; } - if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound)) + first = rt6_multipath_first_sibling_rcu(match); + if (!first) goto out; - list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings, - fib6_siblings) { + hash = fl6->mp_hash; + if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) { + if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif, + strict) >= 0) + match = first; + goto out; + } + + list_for_each_entry_rcu(sibling, &first->fib6_siblings, + fib6_siblings) { const struct fib6_nh *nh = sibling->fib6_nh; int nh_upper_bound; nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound); - if (fl6->mp_hash > nh_upper_bound) + if (hash > nh_upper_bound) continue; if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0) break; @@ -1432,7 +1469,7 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net, if (res->f6i->fib6_destroying) { struct fib6_info *from; - from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); + from = unrcu_pointer(xchg(&pcpu_rt->from, NULL)); fib6_info_release(from); } @@ -1449,7 +1486,6 @@ static DEFINE_SPINLOCK(rt6_exception_lock); static void rt6_remove_exception(struct rt6_exception_bucket *bucket, struct rt6_exception *rt6_ex) { - struct fib6_info *from; struct net *net; if (!bucket || !rt6_ex) @@ -1461,8 +1497,6 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket, /* purge completely the exception to allow releasing the held resources: * some [sk] cache may keep the dst around for unlimited time */ - from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL); - fib6_info_release(from); dst_dev_put(&rt6_ex->rt6i->dst); hlist_del_rcu(&rt6_ex->hlist); @@ -2774,10 +2808,10 @@ static void ip6_negative_advice(struct sock *sk, if (rt->rt6i_flags & RTF_CACHE) { rcu_read_lock(); if (rt6_check_expired(rt)) { - /* counteract the dst_release() in sk_dst_reset() */ - dst_hold(dst); + /* rt/dst can not be destroyed yet, + * because of rcu_read_lock() + */ sk_dst_reset(sk); - rt6_remove_exception_rt(rt); } rcu_read_unlock(); @@ -3190,13 +3224,18 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst) { struct net_device *dev = dst->dev; unsigned int mtu = dst_mtu(dst); - struct net *net = dev_net(dev); + struct net *net; mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); + rcu_read_lock(); + + net = dev_net_rcu(dev); if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss) mtu = net->ipv6.sysctl.ip6_rt_min_advmss; + rcu_read_unlock(); + /* * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and * corresponding MSS is IPV6_MAXPLEN - tcp_header_size. @@ -3632,7 +3671,8 @@ out: in6_dev_put(idev); if (err) { - lwtstate_put(fib6_nh->fib_nh_lws); + fib_nh_common_release(&fib6_nh->nh_common); + fib6_nh->nh_common.nhc_pcpu_rth_output = NULL; fib6_nh->fib_nh_lws = NULL; dev_put(dev); } @@ -3792,10 +3832,12 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, if (nh) { if (rt->fib6_src.plen) { NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); + err = -EINVAL; goto out_free; } if (!nexthop_get(nh)) { NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); + err = -ENOENT; goto out_free; } rt->nh = nh; @@ -5189,14 +5231,18 @@ static void ip6_route_mpath_notify(struct fib6_info *rt, * nexthop. Since sibling routes are always added at the end of * the list, find the first sibling of the last route appended */ + rcu_read_lock(); + if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) { - rt = list_first_entry(&rt_last->fib6_siblings, - struct fib6_info, - fib6_siblings); + rt = list_first_or_null_rcu(&rt_last->fib6_siblings, + struct fib6_info, + fib6_siblings); } if (rt) inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); + + rcu_read_unlock(); } static bool ip6_route_mpath_should_notify(const struct fib6_info *rt) @@ -5541,17 +5587,21 @@ static size_t rt6_nlmsg_size(struct fib6_info *f6i) nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size, &nexthop_len); } else { - struct fib6_info *sibling, *next_sibling; struct fib6_nh *nh = f6i->fib6_nh; + struct fib6_info *sibling; nexthop_len = 0; if (f6i->fib6_nsiblings) { rt6_nh_nlmsg_size(nh, &nexthop_len); - list_for_each_entry_safe(sibling, next_sibling, - &f6i->fib6_siblings, fib6_siblings) { + rcu_read_lock(); + + list_for_each_entry_rcu(sibling, &f6i->fib6_siblings, + fib6_siblings) { rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len); } + + rcu_read_unlock(); } nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws); } @@ -5715,7 +5765,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0) goto nla_put_failure; } else if (rt->fib6_nsiblings) { - struct fib6_info *sibling, *next_sibling; + struct fib6_info *sibling; struct nlattr *mp; mp = nla_nest_start_noflag(skb, RTA_MULTIPATH); @@ -5727,14 +5777,21 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, 0) < 0) goto nla_put_failure; - list_for_each_entry_safe(sibling, next_sibling, - &rt->fib6_siblings, fib6_siblings) { + rcu_read_lock(); + + list_for_each_entry_rcu(sibling, &rt->fib6_siblings, + fib6_siblings) { if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common, sibling->fib6_nh->fib_nh_weight, - AF_INET6, 0) < 0) + AF_INET6, 0) < 0) { + rcu_read_unlock(); + goto nla_put_failure; + } } + rcu_read_unlock(); + nla_nest_end(skb, mp); } else if (rt->nh) { if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id)) @@ -6171,7 +6228,7 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, err = -ENOBUFS; seq = info->nlh ? info->nlh->nlmsg_seq : 0; - skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); + skb = nlmsg_new(rt6_nlmsg_size(rt), GFP_ATOMIC); if (!skb) goto errout; @@ -6184,7 +6241,7 @@ void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info, goto errout; } rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, - info->nlh, gfp_any()); + info->nlh, GFP_ATOMIC); return; errout: if (err < 0) diff --git a/net/ipv6/rpl_iptunnel.c b/net/ipv6/rpl_iptunnel.c index c1d0f947a7c8..862ac1e2e191 100644 --- a/net/ipv6/rpl_iptunnel.c +++ b/net/ipv6/rpl_iptunnel.c @@ -125,7 +125,8 @@ static void rpl_destroy_state(struct lwtunnel_state *lwt) } static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, - const struct ipv6_rpl_sr_hdr *srh) + const struct ipv6_rpl_sr_hdr *srh, + struct dst_entry *cache_dst) { struct ipv6_rpl_sr_hdr *isrh, *csrh; const struct ipv6hdr *oldhdr; @@ -153,7 +154,7 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, hdrlen = ((csrh->hdrlen + 1) << 3); - err = skb_cow_head(skb, hdrlen + skb->mac_len); + err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb)); if (unlikely(err)) { kfree(buf); return err; @@ -186,7 +187,8 @@ static int rpl_do_srh_inline(struct sk_buff *skb, const struct rpl_lwt *rlwt, return 0; } -static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt) +static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt, + struct dst_entry *cache_dst) { struct dst_entry *dst = skb_dst(skb); struct rpl_iptunnel_encap *tinfo; @@ -196,7 +198,7 @@ static int rpl_do_srh(struct sk_buff *skb, const struct rpl_lwt *rlwt) tinfo = rpl_encap_lwtunnel(dst->lwtstate); - return rpl_do_srh_inline(skb, rlwt, tinfo->srh); + return rpl_do_srh_inline(skb, rlwt, tinfo->srh, cache_dst); } static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb) @@ -208,14 +210,14 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb) rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate); - err = rpl_do_srh(skb, rlwt); - if (unlikely(err)) - goto drop; - local_bh_disable(); dst = dst_cache_get(&rlwt->cache); local_bh_enable(); + err = rpl_do_srh(skb, rlwt, dst); + if (unlikely(err)) + goto drop; + if (unlikely(!dst)) { struct ipv6hdr *hdr = ipv6_hdr(skb); struct flowi6 fl6; @@ -237,15 +239,15 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb) local_bh_disable(); dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr); local_bh_enable(); + + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + if (unlikely(err)) + goto drop; } skb_dst_drop(skb); skb_dst_set(skb, dst); - err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); - if (unlikely(err)) - goto drop; - return dst_output(net, sk, skb); drop: @@ -257,35 +259,47 @@ static int rpl_input(struct sk_buff *skb) { struct dst_entry *orig_dst = skb_dst(skb); struct dst_entry *dst = NULL; + struct lwtunnel_state *lwtst; struct rpl_lwt *rlwt; int err; - rlwt = rpl_lwt_lwtunnel(orig_dst->lwtstate); + /* We cannot dereference "orig_dst" once ip6_route_input() or + * skb_dst_drop() is called. However, in order to detect a dst loop, we + * need the address of its lwtstate. So, save the address of lwtstate + * now and use it later as a comparison. + */ + lwtst = orig_dst->lwtstate; - err = rpl_do_srh(skb, rlwt); - if (unlikely(err)) - goto drop; + rlwt = rpl_lwt_lwtunnel(lwtst); local_bh_disable(); dst = dst_cache_get(&rlwt->cache); + local_bh_enable(); + + err = rpl_do_srh(skb, rlwt, dst); + if (unlikely(err)) + goto drop; skb_dst_drop(skb); if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); - if (!dst->error) { + + /* cache only if we don't create a dst reference loop */ + if (!dst->error && lwtst != dst->lwtstate) { + local_bh_disable(); dst_cache_set_ip6(&rlwt->cache, dst, &ipv6_hdr(skb)->saddr); + local_bh_enable(); } + + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + if (unlikely(err)) + goto drop; } else { skb_dst_set(skb, dst); } - local_bh_enable(); - - err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); - if (unlikely(err)) - goto drop; return dst_input(skb); diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index ae5299c277bc..b186d85ec5b3 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -124,8 +124,8 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb, return flowlabel; } -/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ -int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) +static int __seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, + int proto, struct dst_entry *cache_dst) { struct dst_entry *dst = skb_dst(skb); struct net *net = dev_net(dst->dev); @@ -137,7 +137,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) hdrlen = (osrh->hdrlen + 1) << 3; tot_len = hdrlen + sizeof(*hdr); - err = skb_cow_head(skb, tot_len + skb->mac_len); + err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb)); if (unlikely(err)) return err; @@ -197,11 +197,18 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) return 0; } + +/* encapsulate an IPv6 packet within an outer IPv6 header with a given SRH */ +int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) +{ + return __seg6_do_srh_encap(skb, osrh, proto, NULL); +} EXPORT_SYMBOL_GPL(seg6_do_srh_encap); /* encapsulate an IPv6 packet within an outer IPv6 header with reduced SRH */ static int seg6_do_srh_encap_red(struct sk_buff *skb, - struct ipv6_sr_hdr *osrh, int proto) + struct ipv6_sr_hdr *osrh, int proto, + struct dst_entry *cache_dst) { __u8 first_seg = osrh->first_segment; struct dst_entry *dst = skb_dst(skb); @@ -230,7 +237,7 @@ static int seg6_do_srh_encap_red(struct sk_buff *skb, tot_len = red_hdrlen + sizeof(struct ipv6hdr); - err = skb_cow_head(skb, tot_len + skb->mac_len); + err = skb_cow_head(skb, tot_len + dst_dev_overhead(cache_dst, skb)); if (unlikely(err)) return err; @@ -317,8 +324,8 @@ out: return 0; } -/* insert an SRH within an IPv6 packet, just after the IPv6 header */ -int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) +static int __seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, + struct dst_entry *cache_dst) { struct ipv6hdr *hdr, *oldhdr; struct ipv6_sr_hdr *isrh; @@ -326,7 +333,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) hdrlen = (osrh->hdrlen + 1) << 3; - err = skb_cow_head(skb, hdrlen + skb->mac_len); + err = skb_cow_head(skb, hdrlen + dst_dev_overhead(cache_dst, skb)); if (unlikely(err)) return err; @@ -369,9 +376,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) return 0; } -EXPORT_SYMBOL_GPL(seg6_do_srh_inline); -static int seg6_do_srh(struct sk_buff *skb) +static int seg6_do_srh(struct sk_buff *skb, struct dst_entry *cache_dst) { struct dst_entry *dst = skb_dst(skb); struct seg6_iptunnel_encap *tinfo; @@ -384,7 +390,7 @@ static int seg6_do_srh(struct sk_buff *skb) if (skb->protocol != htons(ETH_P_IPV6)) return -EINVAL; - err = seg6_do_srh_inline(skb, tinfo->srh); + err = __seg6_do_srh_inline(skb, tinfo->srh, cache_dst); if (err) return err; break; @@ -402,9 +408,11 @@ static int seg6_do_srh(struct sk_buff *skb) return -EINVAL; if (tinfo->mode == SEG6_IPTUN_MODE_ENCAP) - err = seg6_do_srh_encap(skb, tinfo->srh, proto); + err = __seg6_do_srh_encap(skb, tinfo->srh, + proto, cache_dst); else - err = seg6_do_srh_encap_red(skb, tinfo->srh, proto); + err = seg6_do_srh_encap_red(skb, tinfo->srh, + proto, cache_dst); if (err) return err; @@ -425,11 +433,13 @@ static int seg6_do_srh(struct sk_buff *skb) skb_push(skb, skb->mac_len); if (tinfo->mode == SEG6_IPTUN_MODE_L2ENCAP) - err = seg6_do_srh_encap(skb, tinfo->srh, - IPPROTO_ETHERNET); + err = __seg6_do_srh_encap(skb, tinfo->srh, + IPPROTO_ETHERNET, + cache_dst); else err = seg6_do_srh_encap_red(skb, tinfo->srh, - IPPROTO_ETHERNET); + IPPROTO_ETHERNET, + cache_dst); if (err) return err; @@ -444,6 +454,13 @@ static int seg6_do_srh(struct sk_buff *skb) return 0; } +/* insert an SRH within an IPv6 packet, just after the IPv6 header */ +int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) +{ + return __seg6_do_srh_inline(skb, osrh, NULL); +} +EXPORT_SYMBOL_GPL(seg6_do_srh_inline); + static int seg6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { @@ -455,35 +472,47 @@ static int seg6_input_core(struct net *net, struct sock *sk, { struct dst_entry *orig_dst = skb_dst(skb); struct dst_entry *dst = NULL; + struct lwtunnel_state *lwtst; struct seg6_lwt *slwt; int err; - err = seg6_do_srh(skb); - if (unlikely(err)) - goto drop; + /* We cannot dereference "orig_dst" once ip6_route_input() or + * skb_dst_drop() is called. However, in order to detect a dst loop, we + * need the address of its lwtstate. So, save the address of lwtstate + * now and use it later as a comparison. + */ + lwtst = orig_dst->lwtstate; - slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); + slwt = seg6_lwt_lwtunnel(lwtst); local_bh_disable(); dst = dst_cache_get(&slwt->cache); + local_bh_enable(); + + err = seg6_do_srh(skb, dst); + if (unlikely(err)) + goto drop; skb_dst_drop(skb); if (!dst) { ip6_route_input(skb); dst = skb_dst(skb); - if (!dst->error) { + + /* cache only if we don't create a dst reference loop */ + if (!dst->error && lwtst != dst->lwtstate) { + local_bh_disable(); dst_cache_set_ip6(&slwt->cache, dst, &ipv6_hdr(skb)->saddr); + local_bh_enable(); } + + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + if (unlikely(err)) + goto drop; } else { skb_dst_set(skb, dst); } - local_bh_enable(); - - err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); - if (unlikely(err)) - goto drop; if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled)) return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, @@ -529,16 +558,16 @@ static int seg6_output_core(struct net *net, struct sock *sk, struct seg6_lwt *slwt; int err; - err = seg6_do_srh(skb); - if (unlikely(err)) - goto drop; - slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate); local_bh_disable(); dst = dst_cache_get(&slwt->cache); local_bh_enable(); + err = seg6_do_srh(skb, dst); + if (unlikely(err)) + goto drop; + if (unlikely(!dst)) { struct ipv6hdr *hdr = ipv6_hdr(skb); struct flowi6 fl6; @@ -560,15 +589,15 @@ static int seg6_output_core(struct net *net, struct sock *sk, local_bh_disable(); dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); local_bh_enable(); + + err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); + if (unlikely(err)) + goto drop; } skb_dst_drop(skb); skb_dst_set(skb, dst); - err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev)); - if (unlikely(err)) - goto drop; - if (static_branch_unlikely(&nf_hooks_lwtunnel_enabled)) return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3ffb6a5b1f82..cc24cefdb85c 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -1024,7 +1024,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, ttl = iph6->hop_limit; tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); - if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) { + if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0) { ip_rt_put(rt); goto tx_error; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 06b4acbfd314..0ccaa78f6ff3 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1463,7 +1463,7 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) by tcp. Feel free to propose better solution. --ANK (980728) */ - if (np->rxopt.all) + if (np->rxopt.all && sk->sk_state != TCP_LISTEN) opt_skb = skb_clone_and_charge_r(skb, sk); reason = SKB_DROP_REASON_NOT_SPECIFIED; @@ -1502,8 +1502,6 @@ int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) if (nsk != sk) { if (tcp_child_process(sk, nsk, skb)) goto reset; - if (opt_skb) - __kfree_skb(opt_skb); return 0; } } else diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f55d08d2096a..4b063aa37e38 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1256,9 +1256,9 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, const int hlen = skb_network_header_len(skb) + sizeof(struct udphdr); - if (hlen + cork->gso_size > cork->fragsize) { + if (hlen + min(datalen, cork->gso_size) > cork->fragsize) { kfree_skb(skb); - return -EINVAL; + return -EMSGSIZE; } if (datalen > cork->gso_size * UDP_MAX_SEGMENTS) { kfree_skb(skb); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 815b1df0b2d1..0f660b1d3bd5 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1238,7 +1238,9 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, return -EOPNOTSUPP; /* receive/dequeue next skb: - * the function understands MSG_PEEK and, thus, does not dequeue skb */ + * the function understands MSG_PEEK and, thus, does not dequeue skb + * only refcount is increased. + */ skb = skb_recv_datagram(sk, flags, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -1254,9 +1256,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, cskb = skb; if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { - if (!(flags & MSG_PEEK)) - skb_queue_head(&sk->sk_receive_queue, skb); - return -EFAULT; + err = -EFAULT; + goto err_out; } /* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */ @@ -1273,11 +1274,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg, err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, sizeof(IUCV_SKB_CB(skb)->class), (void *)&IUCV_SKB_CB(skb)->class); - if (err) { - if (!(flags & MSG_PEEK)) - skb_queue_head(&sk->sk_receive_queue, skb); - return err; - } + if (err) + goto err_out; /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { @@ -1333,8 +1331,18 @@ done: /* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */ if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC)) copied = rlen; + if (flags & MSG_PEEK) + skb_unref(skb); return copied; + +err_out: + if (!(flags & MSG_PEEK)) + skb_queue_head(&sk->sk_receive_queue, skb); + else + skb_unref(skb); + + return err; } static inline __poll_t iucv_accept_poll(struct sock *parent) diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 8e3be0009f60..39c7d62c933f 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -888,15 +888,15 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, if (sk->sk_type != SOCK_STREAM) goto copy_uaddr; + /* Partial read */ + if (used + offset < skb_len) + continue; + if (!(flags & MSG_PEEK)) { skb_unlink(skb, &sk->sk_receive_queue); kfree_skb(skb); *seq = 0; } - - /* Partial read */ - if (used + offset < skb_len) - continue; } while (len > 0); out: @@ -1099,7 +1099,7 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname, lock_sock(sk); if (unlikely(level != SOL_LLC || optlen != sizeof(int))) goto out; - rc = copy_from_sockptr(&opt, optval, sizeof(opt)); + rc = copy_safe_from_sockptr(&opt, sizeof(opt), optval, optlen); if (rc) goto out; rc = -EINVAL; diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c index 51bccfb00a9c..61b0159b2fbe 100644 --- a/net/llc/llc_input.c +++ b/net/llc/llc_input.c @@ -124,8 +124,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb) if (unlikely(!pskb_may_pull(skb, llc_len))) return 0; - skb->transport_header += llc_len; skb_pull(skb, llc_len); + skb_reset_transport_header(skb); if (skb->protocol == htons(ETH_P_802_2)) { __be16 pdulen; s32 data_size; diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c index 06fb8e6944b0..7a0cae9a8111 100644 --- a/net/llc/llc_s_ac.c +++ b/net/llc/llc_s_ac.c @@ -24,7 +24,7 @@ #include #include #include - +#include /** * llc_sap_action_unitdata_ind - forward UI PDU to network layer @@ -40,6 +40,26 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) return 0; } +static int llc_prepare_and_xmit(struct sk_buff *skb) +{ + struct llc_sap_state_ev *ev = llc_sap_ev(skb); + struct sk_buff *nskb; + int rc; + + rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); + if (rc) + return rc; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + if (skb->sk) + skb_set_owner_w(nskb, skb->sk); + + return dev_queue_xmit(nskb); +} + /** * llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer * @sap: SAP @@ -52,17 +72,12 @@ int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb) int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(skb); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } /** @@ -77,17 +92,12 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb) int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } /** @@ -133,17 +143,12 @@ out: int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb) { struct llc_sap_state_ev *ev = llc_sap_ev(skb); - int rc; llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap, ev->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_test_cmd(skb); - rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac); - if (likely(!rc)) { - skb_get(skb); - rc = dev_queue_xmit(skb); - } - return rc; + + return llc_prepare_and_xmit(skb); } int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c0dccaceb05e..be48d3f7ffcd 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1683,7 +1683,6 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, struct sta_info *sta, bool new_link, struct link_station_parameters *params) { - int ret = 0; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; u32 link_id = params->link_id < 0 ? 0 : params->link_id; @@ -1725,6 +1724,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, } if (params->txpwr_set) { + int ret; + link_sta->pub->txpwr.type = params->txpwr.type; if (params->txpwr.type == NL80211_TX_POWER_LIMITED) link_sta->pub->txpwr.power = params->txpwr.power; @@ -1766,6 +1767,8 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, params->eht_capa_len, link_sta); + ieee80211_sta_init_nss(link_sta); + if (params->opmode_notif_used) { /* returned value is only needed for rc update, but the * rc isn't initialized here yet, so ignore it @@ -1775,9 +1778,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, sband->band); } - ieee80211_sta_init_nss(link_sta); - - return ret; + return 0; } static int sta_apply_parameters(struct ieee80211_local *local, diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 8ced615add71..f8416965c219 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -588,7 +588,7 @@ static ssize_t ieee80211_if_parse_active_links(struct ieee80211_sub_if_data *sda { u16 active_links; - if (kstrtou16(buf, 0, &active_links)) + if (kstrtou16(buf, 0, &active_links) || !active_links) return -EINVAL; return ieee80211_set_active_links(&sdata->vif, active_links) ?: buflen; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 8a3af4144d3f..4cd413bd764f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1984,8 +1984,6 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb, u32 info_flags, u32 ctrl_flags, u64 *cookie); -void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, - struct sk_buff_head *skbs); struct sk_buff * ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, u32 info_flags); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8861f63e2cfb..fc60e80088a3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -688,6 +688,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ieee80211_txq_remove_vlan(local, sdata); + if (sdata->vif.txq) + ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq)); + sdata->bss = NULL; if (local->open_count == 0) diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 1eec4e2eb74c..683301d9f508 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -144,6 +144,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local) } power = ieee80211_chandef_max_power(&chandef); + if (local->user_power_level != IEEE80211_UNSET_POWER_LEVEL) + power = min(local->user_power_level, power); rcu_read_lock(); list_for_each_entry_rcu(sdata, &local->interfaces, list) { diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 9b1ce7c3925a..47eb67dc11cf 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -365,6 +365,12 @@ u32 airtime_link_metric_get(struct ieee80211_local *local, return (u32)result; } +/* Check that the first metric is at least 10% better than the second one */ +static bool is_metric_better(u32 x, u32 y) +{ + return (x < y) && (x < (y - x / 10)); +} + /** * hwmp_route_info_get - Update routing info to originator and transmitter * @@ -455,8 +461,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, (mpath->sn == orig_sn && (rcu_access_pointer(mpath->next_hop) != sta ? - mult_frac(new_metric, 10, 9) : - new_metric) >= mpath->metric)) { + !is_metric_better(new_metric, mpath->metric) : + new_metric >= mpath->metric))) { process = false; fresh_info = false; } @@ -526,8 +532,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata, if ((mpath->flags & MESH_PATH_FIXED) || ((mpath->flags & MESH_PATH_ACTIVE) && ((rcu_access_pointer(mpath->next_hop) != sta ? - mult_frac(last_hop_metric, 10, 9) : - last_hop_metric) > mpath->metric))) + !is_metric_better(last_hop_metric, mpath->metric) : + last_hop_metric > mpath->metric)))) fresh_info = false; } else { mpath = mesh_path_add(sdata, ta); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9a5530ca2f6b..b300972c3150 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -2896,7 +2896,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, if (tx) ieee80211_flush_queues(local, sdata, false); - drv_mgd_complete_tx(sdata->local, sdata, &info); + if (tx || frame_buf) + drv_mgd_complete_tx(sdata->local, sdata, &info); /* clear AP addr only after building the needed mgmt frames */ eth_zero_addr(sdata->deflink.u.mgd.bssid); @@ -7311,7 +7312,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true, req->reason_code, false); - drv_mgd_complete_tx(sdata->local, sdata, &info); return 0; } diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 3f9ddd7f04b6..3a96aa306616 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -1294,3 +1294,4 @@ void ieee80211_purge_tx_queue(struct ieee80211_hw *hw, while ((skb = __skb_dequeue(skbs))) ieee80211_free_txskb(hw, skb); } +EXPORT_SYMBOL(ieee80211_purge_tx_queue); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 738f1f139a90..e8326e09d1b3 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2436,6 +2436,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n"); else WARN(1, "Hardware became unavailable during restart.\n"); + ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP, + IEEE80211_QUEUE_STOP_REASON_SUSPEND, + false); ieee80211_handle_reconfig_failure(local); return res; } diff --git a/net/mac802154/iface.c b/net/mac802154/iface.c index 7e2065e72915..0233929502ec 100644 --- a/net/mac802154/iface.c +++ b/net/mac802154/iface.c @@ -689,6 +689,10 @@ void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata) ASSERT_RTNL(); mutex_lock(&sdata->local->iflist_mtx); + if (list_empty(&sdata->local->interfaces)) { + mutex_unlock(&sdata->local->iflist_mtx); + return; + } list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index 2316e7772b78..6a963eac1cc2 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -551,6 +551,9 @@ static int mctp_sk_hash(struct sock *sk) { struct net *net = sock_net(sk); + /* Bind lookup runs under RCU, remain live during that. */ + sock_set_flag(sk, SOCK_RCU_FREE); + mutex_lock(&net->mctp.bind_lock); sk_add_node_rcu(sk, &net->mctp.binds); mutex_unlock(&net->mctp.bind_lock); diff --git a/net/mctp/route.c b/net/mctp/route.c index ea7cb9973128..62952ad5cb63 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -274,8 +274,10 @@ static void mctp_flow_prepare_output(struct sk_buff *skb, struct mctp_dev *dev) key = flow->key; - if (WARN_ON(key->dev && key->dev != dev)) + if (key->dev) { + WARN_ON(key->dev != dev); return; + } mctp_dev_set_key(dev, key); } @@ -334,8 +336,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) msk = NULL; rc = -EINVAL; - /* we may be receiving a locally-routed packet; drop source sk - * accounting + /* We may be receiving a locally-routed packet; drop source sk + * accounting. + * + * From here, we will either queue the skb - either to a frag_queue, or + * to a receiving socket. When that succeeds, we clear the skb pointer; + * a non-NULL skb on exit will be otherwise unowned, and hence + * kfree_skb()-ed. */ skb_orphan(skb); @@ -389,7 +396,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * pending key. */ if (flags & MCTP_HDR_FLAG_EOM) { - sock_queue_rcv_skb(&msk->sk, skb); + rc = sock_queue_rcv_skb(&msk->sk, skb); + if (!rc) + skb = NULL; if (key) { /* we've hit a pending reassembly; not much we * can do but drop it @@ -398,7 +407,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) MCTP_TRACE_KEY_REPLIED); key = NULL; } - rc = 0; goto out_unlock; } @@ -425,8 +433,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * this function. */ rc = mctp_key_add(key, msk); - if (!rc) + if (!rc) { trace_mctp_key_acquire(key); + skb = NULL; + } /* we don't need to release key->lock on exit, so * clean up here and suppress the unlock via @@ -444,6 +454,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) key = NULL; } else { rc = mctp_frag_queue(key, skb); + if (!rc) + skb = NULL; } } @@ -458,12 +470,19 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) else rc = mctp_frag_queue(key, skb); + if (rc) + goto out_unlock; + + /* we've queued; the queue owns the skb now */ + skb = NULL; + /* end of message? deliver to socket, and we're done with * the reassembly/response key */ - if (!rc && flags & MCTP_HDR_FLAG_EOM) { - sock_queue_rcv_skb(key->sk, key->reasm_head); - key->reasm_head = NULL; + if (flags & MCTP_HDR_FLAG_EOM) { + rc = sock_queue_rcv_skb(key->sk, key->reasm_head); + if (!rc) + key->reasm_head = NULL; __mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED); key = NULL; } @@ -482,8 +501,7 @@ out_unlock: if (any_key) mctp_key_unref(any_key); out: - if (rc) - kfree_skb(skb); + kfree_skb(skb); return rc; } diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 517bbfe5f626..929074f08713 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -103,7 +103,6 @@ static void mptcp_parse_option(const struct sk_buff *skb, mp_opt->suboptions |= OPTION_MPTCP_DSS; mp_opt->use_map = 1; mp_opt->mpc_map = 1; - mp_opt->use_ack = 0; mp_opt->data_len = get_unaligned_be16(ptr); ptr += 2; } @@ -152,11 +151,6 @@ static void mptcp_parse_option(const struct sk_buff *skb, pr_debug("DSS\n"); ptr++; - /* we must clear 'mpc_map' be able to detect MP_CAPABLE - * map vs DSS map in mptcp_incoming_options(), and reconstruct - * map info accordingly - */ - mp_opt->mpc_map = 0; flags = (*ptr++) & MPTCP_DSS_FLAG_MASK; mp_opt->data_fin = (flags & MPTCP_DSS_DATA_FIN) != 0; mp_opt->dsn64 = (flags & MPTCP_DSS_DSN64) != 0; @@ -364,8 +358,11 @@ void mptcp_get_options(const struct sk_buff *skb, const unsigned char *ptr; int length; - /* initialize option status */ - mp_opt->suboptions = 0; + /* Ensure that casting the whole status to u32 is efficient and safe */ + BUILD_BUG_ON(sizeof_field(struct mptcp_options_received, status) != sizeof(u32)); + BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct mptcp_options_received, status), + sizeof(u32))); + *(u32 *)&mp_opt->status = 0; length = (th->doff * 4) - sizeof(struct tcphdr); ptr = (const unsigned char *)(th + 1); @@ -605,7 +602,6 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb, } opts->ext_copy.use_ack = 1; opts->suboptions = OPTION_MPTCP_DSS; - WRITE_ONCE(msk->old_wspace, __mptcp_space((struct sock *)msk)); /* Add kind/length/subtype/flag overhead if mapping is not populated */ if (dss_size == 0) @@ -653,6 +649,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * struct mptcp_sock *msk = mptcp_sk(subflow->conn); bool drop_other_suboptions = false; unsigned int opt_size = *size; + struct mptcp_addr_info addr; bool echo; int len; @@ -661,13 +658,20 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * */ if (!mptcp_pm_should_add_signal(msk) || (opts->suboptions & (OPTION_MPTCP_MPJ_ACK | OPTION_MPTCP_MPC_ACK)) || - !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &opts->addr, + !mptcp_pm_add_addr_signal(msk, skb, opt_size, remaining, &addr, &echo, &drop_other_suboptions)) return false; + /* + * Later on, mptcp_write_options() will enforce mutually exclusion with + * DSS, bail out if such option is set and we can't drop it. + */ if (drop_other_suboptions) remaining += opt_size; - len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port); + else if (opts->suboptions & OPTION_MPTCP_DSS) + return false; + + len = mptcp_add_addr_len(addr.family, echo, !!addr.port); if (remaining < len) return false; @@ -684,6 +688,7 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff * opts->ahmac = 0; *size -= opt_size; } + opts->addr = addr; opts->suboptions |= OPTION_MPTCP_ADD_ADDR; if (!echo) { opts->ahmac = add_addr_generate_hmac(msk->local_key, @@ -1258,7 +1263,7 @@ static void mptcp_set_rwin(struct tcp_sock *tp, struct tcphdr *th) } MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDCONFLICT); } - return; + goto update_wspace; } if (rcv_wnd_new != rcv_wnd_old) { @@ -1283,6 +1288,9 @@ raise_win: th->window = htons(new_win); MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_RCVWNDSHARED); } + +update_wspace: + WRITE_ONCE(msk->old_wspace, tp->rcv_wnd); } __sum16 __mptcp_make_csum(u64 data_seq, u32 subflow_seq, u16 data_len, __wsum sum) diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 49e8156f5388..3391d4df2dbb 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -525,15 +525,13 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id) } static struct mptcp_pm_addr_entry * -__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info, - bool lookup_by_id) +__lookup_addr(struct pm_nl_pernet *pernet, const struct mptcp_addr_info *info) { struct mptcp_pm_addr_entry *entry; - list_for_each_entry(entry, &pernet->local_addr_list, list) { - if ((!lookup_by_id && - mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) || - (lookup_by_id && entry->addr.id == info->id)) + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list, + lockdep_is_held(&pernet->lock)) { + if (mptcp_addresses_equal(&entry->addr, info, entry->addr.port)) return entry; } return NULL; @@ -564,7 +562,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) mptcp_local_address((struct sock_common *)msk->first, &mpc_addr); rcu_read_lock(); - entry = __lookup_addr(pernet, &mpc_addr, false); + entry = __lookup_addr(pernet, &mpc_addr); if (entry) { __clear_bit(entry->addr.id, msk->pm.id_avail_bitmap); msk->mpc_endpoint_id = entry->addr.id; @@ -971,7 +969,7 @@ static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry) static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, struct mptcp_pm_addr_entry *entry, - bool needs_id) + bool needs_id, bool replace) { struct mptcp_pm_addr_entry *cur, *del_entry = NULL; unsigned int addr_max; @@ -1005,6 +1003,17 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet, if (entry->addr.id) goto out; + /* allow callers that only need to look up the local + * addr's id to skip replacement. This allows them to + * avoid calling synchronize_rcu in the packet recv + * path. + */ + if (!replace) { + kfree(entry); + ret = cur->addr.id; + goto out; + } + pernet->addrs--; entry->addr.id = cur->addr.id; list_del_rcu(&cur->list); @@ -1151,7 +1160,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct mptcp_addr_info *skc entry->ifindex = 0; entry->flags = MPTCP_PM_ADDR_FLAG_IMPLICIT; entry->lsk = NULL; - ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true); + ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, true, false); if (ret < 0) kfree(entry); @@ -1446,7 +1455,8 @@ static int mptcp_nl_cmd_add_addr(struct sk_buff *skb, struct genl_info *info) } } ret = mptcp_pm_nl_append_new_local_addr(pernet, entry, - !mptcp_pm_has_addr_attr_id(attr, info)); + !mptcp_pm_has_addr_attr_id(attr, info), + true); if (ret < 0) { GENL_SET_ERR_MSG(info, "too many addresses or duplicate one"); goto out_free; @@ -1556,11 +1566,6 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, if (mptcp_pm_is_userspace(msk)) goto next; - if (list_empty(&msk->conn_list)) { - mptcp_pm_remove_anno_addr(msk, addr, false); - goto next; - } - lock_sock(sk); remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr); mptcp_pm_remove_anno_addr(msk, addr, remove_subflow && @@ -2081,13 +2086,15 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info) token, &addr, &remote, bkup); spin_lock_bh(&pernet->lock); - entry = __lookup_addr(pernet, &addr.addr, lookup_by_id); + entry = lookup_by_id ? __lookup_addr_by_id(pernet, addr.addr.id) : + __lookup_addr(pernet, &addr.addr); if (!entry) { spin_unlock_bh(&pernet->lock); return -EINVAL; } if ((addr.flags & MPTCP_PM_ADDR_FLAG_FULLMESH) && - (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)) { + (entry->flags & (MPTCP_PM_ADDR_FLAG_SIGNAL | + MPTCP_PM_ADDR_FLAG_IMPLICIT))) { spin_unlock_bh(&pernet->lock); return -EINVAL; } diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c index 748e3876ec6d..9016f8900c19 100644 --- a/net/mptcp/pm_userspace.c +++ b/net/mptcp/pm_userspace.c @@ -106,22 +106,29 @@ static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk, return -EINVAL; } +static struct mptcp_pm_addr_entry * +mptcp_userspace_pm_lookup_addr_by_id(struct mptcp_sock *msk, unsigned int id) +{ + struct mptcp_pm_addr_entry *entry; + + list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { + if (entry->addr.id == id) + return entry; + } + return NULL; +} + int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, unsigned int id, u8 *flags, int *ifindex) { - struct mptcp_pm_addr_entry *entry, *match = NULL; + struct mptcp_pm_addr_entry *match; *flags = 0; *ifindex = 0; spin_lock_bh(&msk->pm.lock); - list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { - if (id == entry->addr.id) { - match = entry; - break; - } - } + match = mptcp_userspace_pm_lookup_addr_by_id(msk, id); spin_unlock_bh(&msk->pm.lock); if (match) { *flags = match->flags; @@ -185,6 +192,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) struct mptcp_pm_addr_entry addr_val; struct mptcp_sock *msk; int err = -EINVAL; + struct sock *sk; u32 token_val; if (!addr || !token) { @@ -200,6 +208,8 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) return err; } + sk = (struct sock *)msk; + if (!mptcp_pm_is_userspace(msk)) { GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected"); goto announce_err; @@ -223,7 +233,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) goto announce_err; } - lock_sock((struct sock *)msk); + lock_sock(sk); spin_lock_bh(&msk->pm.lock); if (mptcp_pm_alloc_anno_list(msk, &addr_val.addr)) { @@ -233,11 +243,11 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info) } spin_unlock_bh(&msk->pm.lock); - release_sock((struct sock *)msk); + release_sock(sk); err = 0; announce_err: - sock_put((struct sock *)msk); + sock_put(sk); return err; } @@ -279,11 +289,12 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info) { struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN]; struct nlattr *id = info->attrs[MPTCP_PM_ATTR_LOC_ID]; - struct mptcp_pm_addr_entry *match = NULL; + struct mptcp_pm_addr_entry *match; struct mptcp_pm_addr_entry *entry; struct mptcp_sock *msk; LIST_HEAD(free_list); int err = -EINVAL; + struct sock *sk; u32 token_val; u8 id_val; @@ -301,6 +312,8 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info) return err; } + sk = (struct sock *)msk; + if (!mptcp_pm_is_userspace(msk)) { GENL_SET_ERR_MSG(info, "invalid request; userspace PM not selected"); goto remove_err; @@ -311,34 +324,31 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info) goto remove_err; } - lock_sock((struct sock *)msk); - - list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { - if (entry->addr.id == id_val) { - match = entry; - break; - } - } + lock_sock(sk); + spin_lock_bh(&msk->pm.lock); + match = mptcp_userspace_pm_lookup_addr_by_id(msk, id_val); if (!match) { GENL_SET_ERR_MSG(info, "address with specified id not found"); - release_sock((struct sock *)msk); + spin_unlock_bh(&msk->pm.lock); + release_sock(sk); goto remove_err; } list_move(&match->list, &free_list); + spin_unlock_bh(&msk->pm.lock); mptcp_pm_remove_addrs(msk, &free_list); - release_sock((struct sock *)msk); + release_sock(sk); list_for_each_entry_safe(match, entry, &free_list, list) { - sock_kfree_s((struct sock *)msk, match, sizeof(*match)); + sock_kfree_s(sk, match, sizeof(*match)); } err = 0; remove_err: - sock_put((struct sock *)msk); + sock_put(sk); return err; } @@ -558,8 +568,10 @@ int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token, struct mptcp_pm_addr_entry *loc, struct mptcp_pm_addr_entry *rem, u8 bkup) { + struct mptcp_pm_addr_entry *entry; struct mptcp_sock *msk; int ret = -EINVAL; + struct sock *sk; u32 token_val; token_val = nla_get_u32(token); @@ -568,6 +580,8 @@ int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token, if (!msk) return ret; + sk = (struct sock *)msk; + if (!mptcp_pm_is_userspace(msk)) goto set_flags_err; @@ -575,11 +589,22 @@ int mptcp_userspace_pm_set_flags(struct net *net, struct nlattr *token, rem->addr.family == AF_UNSPEC) goto set_flags_err; - lock_sock((struct sock *)msk); + spin_lock_bh(&msk->pm.lock); + list_for_each_entry(entry, &msk->pm.userspace_pm_local_addr_list, list) { + if (mptcp_addresses_equal(&entry->addr, &loc->addr, false)) { + if (bkup) + entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP; + else + entry->flags &= ~MPTCP_PM_ADDR_FLAG_BACKUP; + } + } + spin_unlock_bh(&msk->pm.lock); + + lock_sock(sk); ret = mptcp_pm_nl_mp_prio_send_ack(msk, &loc->addr, &rem->addr, bkup); - release_sock((struct sock *)msk); + release_sock(sk); set_flags_err: - sock_put((struct sock *)msk); + sock_put(sk); return ret; } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index d68e93dab88c..e975693b8fa9 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -149,6 +149,7 @@ static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, int delta; if (MPTCP_SKB_CB(from)->offset || + ((to->len + from->len) > (sk->sk_rcvbuf >> 3)) || !skb_try_coalesce(to, from, &fragstolen, &delta)) return false; @@ -538,13 +539,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk) mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); } -static void mptcp_subflow_cleanup_rbuf(struct sock *ssk) +static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied) { bool slow; slow = lock_sock_fast(ssk); if (tcp_can_send_ack(ssk)) - tcp_cleanup_rbuf(ssk, 1); + tcp_cleanup_rbuf(ssk, copied); unlock_sock_fast(ssk, slow); } @@ -561,7 +562,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED))); } -static void mptcp_cleanup_rbuf(struct mptcp_sock *msk) +static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) { int old_space = READ_ONCE(msk->old_wspace); struct mptcp_subflow_context *subflow; @@ -569,14 +570,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk) int space = __mptcp_space(sk); bool cleanup, rx_empty; - cleanup = (space > 0) && (space >= (old_space << 1)); - rx_empty = !__mptcp_rmem(sk); + cleanup = (space > 0) && (space >= (old_space << 1)) && copied; + rx_empty = !__mptcp_rmem(sk) && copied; mptcp_for_each_subflow(msk, subflow) { struct sock *ssk = mptcp_subflow_tcp_sock(subflow); if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) - mptcp_subflow_cleanup_rbuf(ssk); + mptcp_subflow_cleanup_rbuf(ssk, copied); } } @@ -1779,8 +1780,10 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh * see mptcp_disconnect(). * Attempt it again outside the problematic scope. */ - if (!mptcp_disconnect(sk, 0)) + if (!mptcp_disconnect(sk, 0)) { + sk->sk_disconnects++; sk->sk_socket->state = SS_UNCONNECTED; + } } return ret; @@ -1917,6 +1920,8 @@ do_error: goto out; } +static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); + static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, struct msghdr *msg, size_t len, int flags, @@ -1968,6 +1973,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, break; } + mptcp_rcv_space_adjust(msk, copied); return copied; } @@ -2057,7 +2063,8 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) slow = lock_sock_fast(ssk); WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); tcp_sk(ssk)->window_clamp = window_clamp; - tcp_cleanup_rbuf(ssk, 1); + if (tcp_can_send_ack(ssk)) + tcp_cleanup_rbuf(ssk, 1); unlock_sock_fast(ssk, slow); } } @@ -2180,7 +2187,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, cmsg_flags = MPTCP_CMSG_INQ; while (copied < len) { - int bytes_read; + int err, bytes_read; bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags); if (unlikely(bytes_read < 0)) { @@ -2191,9 +2198,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, copied += bytes_read; - /* be sure to advertise window change */ - mptcp_cleanup_rbuf(msk); - if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk)) continue; @@ -2245,9 +2249,16 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, } pr_debug("block timeout %ld\n", timeo); - sk_wait_data(sk, &timeo, NULL); + mptcp_cleanup_rbuf(msk, copied); + err = sk_wait_data(sk, &timeo, NULL); + if (err < 0) { + err = copied ? : err; + goto out_err; + } } + mptcp_cleanup_rbuf(msk, copied); + out_err: if (cmsg_flags && copied >= 0) { if (cmsg_flags & MPTCP_CMSG_TS) @@ -2263,8 +2274,6 @@ out_err: pr_debug("msk=%p rx queue empty=%d:%d copied=%d\n", msk, skb_queue_empty_lockless(&sk->sk_receive_queue), skb_queue_empty(&msk->receive_queue), copied); - if (!(flags & MSG_PEEK)) - mptcp_rcv_space_adjust(msk, copied); release_sock(sk); return copied; @@ -2702,8 +2711,8 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp) return; - close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies + - TCP_TIMEWAIT_LEN; + close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp - + tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN; /* the close timeout takes precedence on the fail one, and here at least one of * them is active diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index d39ad72ac8e8..25c1cda5c1bc 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -141,22 +141,24 @@ struct mptcp_options_received { u32 subflow_seq; u16 data_len; __sum16 csum; - u16 suboptions; + struct_group(status, + u16 suboptions; + u16 use_map:1, + dsn64:1, + data_fin:1, + use_ack:1, + ack64:1, + mpc_map:1, + reset_reason:4, + reset_transient:1, + echo:1, + backup:1, + deny_join_id0:1, + __unused:2; + ); + u8 join_id; u32 token; u32 nonce; - u16 use_map:1, - dsn64:1, - data_fin:1, - use_ack:1, - ack64:1, - mpc_map:1, - reset_reason:4, - reset_transient:1, - echo:1, - backup:1, - deny_join_id0:1, - __unused:2; - u8 join_id; u64 thmac; u8 hmac[MPTCPOPT_HMAC_LEN]; struct mptcp_addr_info addr; @@ -979,6 +981,8 @@ static inline void __mptcp_do_fallback(struct mptcp_sock *msk) pr_debug("TCP fallback already done (msk=%p)\n", msk); return; } + if (WARN_ON_ONCE(!READ_ONCE(msk->allow_infinite_fallback))) + return; set_bit(MPTCP_FALLBACK_DONE, &msk->flags); } diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index 766797ace942..129c9e9ee396 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -1266,6 +1266,32 @@ static int mptcp_getsockopt_v4(struct mptcp_sock *msk, int optname, switch (optname) { case IP_TOS: return mptcp_put_int_option(msk, optval, optlen, inet_sk(sk)->tos); + case IP_FREEBIND: + return mptcp_put_int_option(msk, optval, optlen, + inet_sk(sk)->freebind); + case IP_TRANSPARENT: + return mptcp_put_int_option(msk, optval, optlen, + inet_sk(sk)->transparent); + } + + return -EOPNOTSUPP; +} + +static int mptcp_getsockopt_v6(struct mptcp_sock *msk, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = (void *)msk; + + switch (optname) { + case IPV6_V6ONLY: + return mptcp_put_int_option(msk, optval, optlen, + sk->sk_ipv6only); + case IPV6_TRANSPARENT: + return mptcp_put_int_option(msk, optval, optlen, + inet_sk(sk)->transparent); + case IPV6_FREEBIND: + return mptcp_put_int_option(msk, optval, optlen, + inet_sk(sk)->freebind); } return -EOPNOTSUPP; @@ -1308,6 +1334,8 @@ int mptcp_getsockopt(struct sock *sk, int level, int optname, if (level == SOL_IP) return mptcp_getsockopt_v4(msk, optname, optval, option); + if (level == SOL_IPV6) + return mptcp_getsockopt_v6(msk, optname, optval, option); if (level == SOL_TCP) return mptcp_getsockopt_sol_tcp(msk, optname, optval, option); if (level == SOL_MPTCP) diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 884abe3cde53..a6237eb55537 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -643,8 +643,6 @@ static bool subflow_hmac_valid(const struct request_sock *req, subflow_req = mptcp_subflow_rsk(req); msk = subflow_req->msk; - if (!msk) - return false; subflow_generate_hmac(msk->remote_key, msk->local_key, subflow_req->remote_nonce, @@ -739,12 +737,8 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, } else if (subflow_req->mp_join) { mptcp_get_options(skb, &mp_opt); - if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) || - !subflow_hmac_valid(req, &mp_opt) || - !mptcp_can_accept_new_subflow(subflow_req->msk)) { - SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); + if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK)) fallback = true; - } } create_child: @@ -794,6 +788,17 @@ create_child: goto dispose_child; } + if (!subflow_hmac_valid(req, &mp_opt)) { + SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC); + subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); + goto dispose_child; + } + + if (!mptcp_can_accept_new_subflow(owner)) { + subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); + goto dispose_child; + } + /* move the msk reference ownership to the subflow */ subflow_req->msk = NULL; ctx->conn = (struct sock *)owner; @@ -1020,7 +1025,6 @@ static enum mapping_status get_mapping_status(struct sock *ssk, if (data_len == 0) { pr_debug("infinite mapping received\n"); MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX); - subflow->map_data_len = 0; return MAPPING_INVALID; } @@ -1162,18 +1166,6 @@ static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ss mptcp_schedule_work(sk); } -static bool subflow_can_fallback(struct mptcp_subflow_context *subflow) -{ - struct mptcp_sock *msk = mptcp_sk(subflow->conn); - - if (subflow->mp_join) - return false; - else if (READ_ONCE(msk->csum_enabled)) - return !subflow->valid_csum_seen; - else - return READ_ONCE(msk->allow_infinite_fallback); -} - static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk) { struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); @@ -1277,7 +1269,7 @@ fallback: return true; } - if (!subflow_can_fallback(subflow) && subflow->map_data_len) { + if (!READ_ONCE(msk->allow_infinite_fallback)) { /* fatal protocol error, close the socket. * subflow_error_report() will introduce the appropriate barriers */ diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index ef0f8f73826f..4e0842df5234 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h @@ -289,6 +289,7 @@ enum { ncsi_dev_state_config_sp = 0x0301, ncsi_dev_state_config_cis, ncsi_dev_state_config_oem_gma, + ncsi_dev_state_config_apply_mac, ncsi_dev_state_config_clear_vids, ncsi_dev_state_config_svf, ncsi_dev_state_config_ev, @@ -322,6 +323,7 @@ struct ncsi_dev_priv { #define NCSI_DEV_RESHUFFLE 4 #define NCSI_DEV_RESET 8 /* Reset state of NC */ unsigned int gma_flag; /* OEM GMA flag */ + struct sockaddr pending_mac; /* MAC address received from GMA */ spinlock_t lock; /* Protect the NCSI device */ unsigned int package_probe_id;/* Current ID during probe */ unsigned int package_num; /* Number of packages */ diff --git a/net/ncsi/ncsi-cmd.c b/net/ncsi/ncsi-cmd.c index dda8b76b7798..7be177f55173 100644 --- a/net/ncsi/ncsi-cmd.c +++ b/net/ncsi/ncsi-cmd.c @@ -269,7 +269,8 @@ static struct ncsi_cmd_handler { { NCSI_PKT_CMD_GPS, 0, ncsi_cmd_handler_default }, { NCSI_PKT_CMD_OEM, -1, ncsi_cmd_handler_oem }, { NCSI_PKT_CMD_PLDM, 0, NULL }, - { NCSI_PKT_CMD_GPUUID, 0, ncsi_cmd_handler_default } + { NCSI_PKT_CMD_GPUUID, 0, ncsi_cmd_handler_default }, + { NCSI_PKT_CMD_GMCMA, 0, ncsi_cmd_handler_default } }; static struct ncsi_request *ncsi_alloc_command(struct ncsi_cmd_arg *nca) diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 760b33fa03a8..6a58ae8b5d03 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -1038,17 +1038,34 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp) : ncsi_dev_state_config_clear_vids; break; case ncsi_dev_state_config_oem_gma: - nd->state = ncsi_dev_state_config_clear_vids; + nd->state = ncsi_dev_state_config_apply_mac; - nca.type = NCSI_PKT_CMD_OEM; nca.package = np->id; nca.channel = nc->id; ndp->pending_req_num = 1; - ret = ncsi_gma_handler(&nca, nc->version.mf_id); - if (ret < 0) + if (nc->version.major >= 1 && nc->version.minor >= 2) { + nca.type = NCSI_PKT_CMD_GMCMA; + ret = ncsi_xmit_cmd(&nca); + } else { + nca.type = NCSI_PKT_CMD_OEM; + ret = ncsi_gma_handler(&nca, nc->version.mf_id); + } + if (ret < 0) { + nd->state = ncsi_dev_state_config_clear_vids; schedule_work(&ndp->work); + } break; + case ncsi_dev_state_config_apply_mac: + rtnl_lock(); + ret = dev_set_mac_address(dev, &ndp->pending_mac, NULL); + rtnl_unlock(); + if (ret < 0) + netdev_warn(dev, "NCSI: 'Writing MAC address to device failed\n"); + + nd->state = ncsi_dev_state_config_clear_vids; + + fallthrough; case ncsi_dev_state_config_clear_vids: case ncsi_dev_state_config_svf: case ncsi_dev_state_config_ev: @@ -1368,6 +1385,12 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) nd->state = ncsi_dev_state_probe_package; break; case ncsi_dev_state_probe_package: + if (ndp->package_probe_id >= 8) { + /* Last package probed, finishing */ + ndp->flags |= NCSI_DEV_PROBED; + break; + } + ndp->pending_req_num = 1; nca.type = NCSI_PKT_CMD_SP; @@ -1484,13 +1507,8 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp) if (ret) goto error; - /* Probe next package */ + /* Probe next package after receiving response */ ndp->package_probe_id++; - if (ndp->package_probe_id >= 8) { - /* Probe finished */ - ndp->flags |= NCSI_DEV_PROBED; - break; - } nd->state = ncsi_dev_state_probe_package; ndp->active_package = NULL; break; diff --git a/net/ncsi/ncsi-pkt.h b/net/ncsi/ncsi-pkt.h index c9d1da34dc4d..f2f3b5c1b941 100644 --- a/net/ncsi/ncsi-pkt.h +++ b/net/ncsi/ncsi-pkt.h @@ -338,6 +338,14 @@ struct ncsi_rsp_gpuuid_pkt { __be32 checksum; }; +/* Get MC MAC Address */ +struct ncsi_rsp_gmcma_pkt { + struct ncsi_rsp_pkt_hdr rsp; + unsigned char address_count; + unsigned char reserved[3]; + unsigned char addresses[][ETH_ALEN]; +}; + /* AEN: Link State Change */ struct ncsi_aen_lsc_pkt { struct ncsi_aen_pkt_hdr aen; /* AEN header */ @@ -398,6 +406,7 @@ struct ncsi_aen_hncdsc_pkt { #define NCSI_PKT_CMD_GPUUID 0x52 /* Get package UUID */ #define NCSI_PKT_CMD_QPNPR 0x56 /* Query Pending NC PLDM request */ #define NCSI_PKT_CMD_SNPR 0x57 /* Send NC PLDM Reply */ +#define NCSI_PKT_CMD_GMCMA 0x58 /* Get MC MAC Address */ /* NCSI packet responses */ @@ -433,6 +442,7 @@ struct ncsi_aen_hncdsc_pkt { #define NCSI_PKT_RSP_GPUUID (NCSI_PKT_CMD_GPUUID + 0x80) #define NCSI_PKT_RSP_QPNPR (NCSI_PKT_CMD_QPNPR + 0x80) #define NCSI_PKT_RSP_SNPR (NCSI_PKT_CMD_SNPR + 0x80) +#define NCSI_PKT_RSP_GMCMA (NCSI_PKT_CMD_GMCMA + 0x80) /* NCSI response code/reason */ #define NCSI_PKT_RSP_C_COMPLETED 0x0000 /* Command Completed */ diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index f22d67cb04d3..4a8ce2949fae 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c @@ -628,16 +628,14 @@ static int ncsi_rsp_handler_snfc(struct ncsi_request *nr) static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id) { struct ncsi_dev_priv *ndp = nr->ndp; + struct sockaddr *saddr = &ndp->pending_mac; struct net_device *ndev = ndp->ndev.dev; struct ncsi_rsp_oem_pkt *rsp; - struct sockaddr saddr; u32 mac_addr_off = 0; - int ret = 0; /* Get the response header */ rsp = (struct ncsi_rsp_oem_pkt *)skb_network_header(nr->rsp); - saddr.sa_family = ndev->type; ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; if (mfr_id == NCSI_OEM_MFR_BCM_ID) mac_addr_off = BCM_MAC_ADDR_OFFSET; @@ -646,22 +644,17 @@ static int ncsi_rsp_handler_oem_gma(struct ncsi_request *nr, int mfr_id) else if (mfr_id == NCSI_OEM_MFR_INTEL_ID) mac_addr_off = INTEL_MAC_ADDR_OFFSET; - memcpy(saddr.sa_data, &rsp->data[mac_addr_off], ETH_ALEN); + saddr->sa_family = ndev->type; + memcpy(saddr->sa_data, &rsp->data[mac_addr_off], ETH_ALEN); if (mfr_id == NCSI_OEM_MFR_BCM_ID || mfr_id == NCSI_OEM_MFR_INTEL_ID) - eth_addr_inc((u8 *)saddr.sa_data); - if (!is_valid_ether_addr((const u8 *)saddr.sa_data)) + eth_addr_inc((u8 *)saddr->sa_data); + if (!is_valid_ether_addr((const u8 *)saddr->sa_data)) return -ENXIO; /* Set the flag for GMA command which should only be called once */ ndp->gma_flag = 1; - rtnl_lock(); - ret = dev_set_mac_address(ndev, &saddr, NULL); - rtnl_unlock(); - if (ret < 0) - netdev_warn(ndev, "NCSI: 'Writing mac address to device failed\n"); - - return ret; + return 0; } /* Response handler for Mellanox card */ @@ -1093,6 +1086,42 @@ static int ncsi_rsp_handler_netlink(struct ncsi_request *nr) return ret; } +static int ncsi_rsp_handler_gmcma(struct ncsi_request *nr) +{ + struct ncsi_dev_priv *ndp = nr->ndp; + struct sockaddr *saddr = &ndp->pending_mac; + struct net_device *ndev = ndp->ndev.dev; + struct ncsi_rsp_gmcma_pkt *rsp; + int i; + + rsp = (struct ncsi_rsp_gmcma_pkt *)skb_network_header(nr->rsp); + ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + netdev_info(ndev, "NCSI: Received %d provisioned MAC addresses\n", + rsp->address_count); + for (i = 0; i < rsp->address_count; i++) { + netdev_info(ndev, "NCSI: MAC address %d: %02x:%02x:%02x:%02x:%02x:%02x\n", + i, rsp->addresses[i][0], rsp->addresses[i][1], + rsp->addresses[i][2], rsp->addresses[i][3], + rsp->addresses[i][4], rsp->addresses[i][5]); + } + + saddr->sa_family = ndev->type; + for (i = 0; i < rsp->address_count; i++) { + if (!is_valid_ether_addr(rsp->addresses[i])) { + netdev_warn(ndev, "NCSI: Unable to assign %pM to device\n", + rsp->addresses[i]); + continue; + } + memcpy(saddr->sa_data, rsp->addresses[i], ETH_ALEN); + netdev_warn(ndev, "NCSI: Will set MAC address to %pM\n", saddr->sa_data); + break; + } + + ndp->gma_flag = 1; + return 0; +} + static struct ncsi_rsp_handler { unsigned char type; int payload; @@ -1129,7 +1158,8 @@ static struct ncsi_rsp_handler { { NCSI_PKT_RSP_PLDM, -1, ncsi_rsp_handler_pldm }, { NCSI_PKT_RSP_GPUUID, 20, ncsi_rsp_handler_gpuuid }, { NCSI_PKT_RSP_QPNPR, -1, ncsi_rsp_handler_pldm }, - { NCSI_PKT_RSP_SNPR, -1, ncsi_rsp_handler_pldm } + { NCSI_PKT_RSP_SNPR, -1, ncsi_rsp_handler_pldm }, + { NCSI_PKT_RSP_GMCMA, -1, ncsi_rsp_handler_gmcma }, }; int ncsi_rcv_rsp(struct sk_buff *skb, struct net_device *dev, diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c index e4fa00abde6a..5988b9bb9029 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ip.c +++ b/net/netfilter/ipset/ip_set_bitmap_ip.c @@ -163,11 +163,8 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; - if (ip > ip_to) { + if (ip > ip_to) swap(ip, ip_to); - if (ip < map->first_ip) - return -IPSET_ERR_BITMAP_RANGE; - } } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); @@ -178,7 +175,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], ip_to = ip; } - if (ip_to > map->last_ip) + if (ip < map->first_ip || ip_to > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; for (; !before(ip_to, ip); ip += map->hosts) { diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 0b24b638bfd2..56215fb63b64 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -104,14 +104,19 @@ find_set_type(const char *name, u8 family, u8 revision) static bool load_settype(const char *name) { + if (!try_module_get(THIS_MODULE)) + return false; + nfnl_unlock(NFNL_SUBSYS_IPSET); pr_debug("try to load ip_set_%s\n", name); if (request_module("ip_set_%s", name) < 0) { pr_warn("Can't find ip_set type %s\n", name); nfnl_lock(NFNL_SUBSYS_IPSET); + module_put(THIS_MODULE); return false; } nfnl_lock(NFNL_SUBSYS_IPSET); + module_put(THIS_MODULE); return true; } diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h index ef04e556aadb..0bd6bf46f05f 100644 --- a/net/netfilter/ipset/ip_set_hash_gen.h +++ b/net/netfilter/ipset/ip_set_hash_gen.h @@ -63,7 +63,7 @@ struct hbucket { #define ahash_sizeof_regions(htable_bits) \ (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region)) #define ahash_region(n, htable_bits) \ - ((n) % ahash_numof_locks(htable_bits)) + ((n) / jhash_size(HTABLE_REGION_BITS)) #define ahash_bucket_start(h, htable_bits) \ ((htable_bits) < HTABLE_REGION_BITS ? 0 \ : (h) * jhash_size(HTABLE_REGION_BITS)) diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 902ff2f3bc72..5cc35b553a04 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -611,6 +611,8 @@ init_list_set(struct net *net, struct ip_set *set, u32 size) return true; } +static struct lock_class_key list_set_lockdep_key; + static int list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[], u32 flags) @@ -627,6 +629,7 @@ list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[], if (size < IP_SET_LIST_MIN_SIZE) size = IP_SET_LIST_MIN_SIZE; + lockdep_set_class(&set->lock, &list_set_lockdep_key); set->variant = &set_variant; set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem), __alignof__(struct set_elem)); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 17a1b731a76b..6cc50f05c46c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1382,18 +1382,18 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, sched = NULL; } - /* Bind the ct retriever */ - RCU_INIT_POINTER(svc->pe, pe); - pe = NULL; - /* Update the virtual service counters */ if (svc->port == FTPPORT) atomic_inc(&ipvs->ftpsvc_counter); else if (svc->port == 0) atomic_inc(&ipvs->nullsvc_counter); - if (svc->pe && svc->pe->conn_out) + if (pe && pe->conn_out) atomic_inc(&ipvs->conn_out_counter); + /* Bind the ct retriever */ + RCU_INIT_POINTER(svc->pe, pe); + pe = NULL; + ip_vs_start_estimator(ipvs, &svc->stats); /* Count only IPv4 services for old get/setsockopt interface */ @@ -2854,12 +2854,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) case IP_VS_SO_GET_SERVICES: { struct ip_vs_get_services *get; - int size; + size_t size; get = (struct ip_vs_get_services *)arg; size = struct_size(get, entrytable, get->num_services); if (*len != size) { - pr_err("length: %u != %u\n", *len, size); + pr_err("length: %u != %zu\n", *len, size); ret = -EINVAL; goto out; } @@ -2895,12 +2895,12 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) case IP_VS_SO_GET_DESTS: { struct ip_vs_get_dests *get; - int size; + size_t size; get = (struct ip_vs_get_dests *)arg; size = struct_size(get, entrytable, get->num_dests); if (*len != size) { - pr_err("length: %u != %u\n", *len, size); + pr_err("length: %u != %zu\n", *len, size); ret = -EINVAL; goto out; } diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index f100da4ba3bc..a9fd1d3fc2cb 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -340,7 +340,7 @@ void __net_exit ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs) int __init ip_vs_protocol_init(void) { - char protocols[64]; + char protocols[64] = { 0 }; #define REGISTER_PROTOCOL(p) \ do { \ register_ip_vs_protocol(p); \ @@ -348,8 +348,6 @@ int __init ip_vs_protocol_init(void) strcat(protocols, (p)->name); \ } while (0) - protocols[0] = '\0'; - protocols[2] = '\0'; #ifdef CONFIG_IP_VS_PROTO_TCP REGISTER_PROTOCOL(&ip_vs_protocol_tcp); #endif diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index d40a4ca2b27f..e1437c72ca6e 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -119,13 +119,12 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu) return false; } -/* Get route to daddr, update *saddr, optionally bind route to saddr */ +/* Get route to daddr, optionally bind route to saddr */ static struct rtable *do_output_route4(struct net *net, __be32 daddr, - int rt_mode, __be32 *saddr) + int rt_mode, __be32 *ret_saddr) { struct flowi4 fl4; struct rtable *rt; - bool loop = false; memset(&fl4, 0, sizeof(fl4)); fl4.daddr = daddr; @@ -135,23 +134,17 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr, retry: rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) { - /* Invalid saddr ? */ - if (PTR_ERR(rt) == -EINVAL && *saddr && - rt_mode & IP_VS_RT_MODE_CONNECT && !loop) { - *saddr = 0; - flowi4_update_output(&fl4, 0, 0, daddr, 0); - goto retry; - } IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr); return NULL; - } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) { + } + if (rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) { ip_rt_put(rt); - *saddr = fl4.saddr; - flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr); - loop = true; + flowi4_update_output(&fl4, 0, daddr, fl4.saddr); + rt_mode = 0; goto retry; } - *saddr = fl4.saddr; + if (ret_saddr) + *ret_saddr = fl4.saddr; return rt; } @@ -344,19 +337,15 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, if (ret_saddr) *ret_saddr = dest_dst->dst_saddr.ip; } else { - __be32 saddr = htonl(INADDR_ANY); - noref = 0; /* For such unconfigured boxes avoid many route lookups * for performance reasons because we do not remember saddr */ rt_mode &= ~IP_VS_RT_MODE_CONNECT; - rt = do_output_route4(net, daddr, rt_mode, &saddr); + rt = do_output_route4(net, daddr, rt_mode, ret_saddr); if (!rt) goto err_unreach; - if (ret_saddr) - *ret_saddr = saddr; } local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0; diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 5885810da412..6156c0751056 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -132,7 +132,7 @@ static int __nf_conncount_add(struct net *net, struct nf_conn *found_ct; unsigned int collect = 0; - if (time_is_after_eq_jiffies((unsigned long)list->last_gc)) + if ((u32)jiffies == list->last_gc) goto add_new_node; /* check the saved connections */ @@ -234,7 +234,7 @@ bool nf_conncount_gc_list(struct net *net, bool ret = false; /* don't bother if we just did GC */ - if (time_is_after_eq_jiffies((unsigned long)READ_ONCE(list->last_gc))) + if ((u32)jiffies == READ_ONCE(list->last_gc)) return false; /* don't bother if other cpu is already doing GC */ @@ -377,6 +377,8 @@ restart: conn->tuple = *tuple; conn->zone = *zone; + conn->cpu = raw_smp_processor_id(); + conn->jiffies32 = (u32)jiffies; memcpy(rbconn->key, key, sizeof(u32) * data->keylen); nf_conncount_list_init(&rbconn->list); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 024f93fc8c0b..ec4c39641089 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1770,7 +1770,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, cnet = nf_ct_pernet(net); if (cnet->expect_count) { spin_lock_bh(&nf_conntrack_expect_lock); - exp = nf_ct_find_expectation(net, zone, tuple); + exp = nf_ct_find_expectation(net, zone, tuple, !tmpl || nf_ct_is_confirmed(tmpl)); if (exp) { pr_debug("expectation arrives ct=%p exp=%p\n", ct, exp); @@ -2591,12 +2591,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) struct hlist_nulls_head *hash; unsigned int nr_slots, i; - if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head))) + if (*sizep > (INT_MAX / sizeof(struct hlist_nulls_head))) return NULL; BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); + if (nr_slots > (INT_MAX / sizeof(struct hlist_nulls_head))) + return NULL; + hash = kvcalloc(nr_slots, sizeof(struct hlist_nulls_head), GFP_KERNEL); if (hash && nulls) diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 96948e98ec53..81ca348915c9 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -171,7 +171,7 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_find_get); struct nf_conntrack_expect * nf_ct_find_expectation(struct net *net, const struct nf_conntrack_zone *zone, - const struct nf_conntrack_tuple *tuple) + const struct nf_conntrack_tuple *tuple, bool unlink) { struct nf_conntrack_net *cnet = nf_ct_pernet(net); struct nf_conntrack_expect *i, *exp = NULL; @@ -211,7 +211,7 @@ nf_ct_find_expectation(struct net *net, !refcount_inc_not_zero(&exp->master->ct_general.use))) return NULL; - if (exp->flags & NF_CT_EXPECT_PERMANENT) { + if (exp->flags & NF_CT_EXPECT_PERMANENT || !unlink) { refcount_inc(&exp->use); return exp; } else if (del_timer(&exp->timeout)) { diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 28d957860dda..2a921b3d6438 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -631,7 +631,9 @@ static struct ctl_table nf_ct_sysctl_table[] = { .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_COUNT] = { .procname = "nf_conntrack_count", @@ -667,7 +669,9 @@ static struct ctl_table nf_ct_sysctl_table[] = { .data = &nf_ct_expect_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE, + .extra2 = SYSCTL_INT_MAX, }, [NF_SYSCTL_CT_ACCT] = { .procname = "nf_conntrack_acct", @@ -970,7 +974,9 @@ static struct ctl_table nf_ct_netfilter_table[] = { .data = &nf_conntrack_max, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_INT_MAX, }, { } }; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ed09b1fdda16..0bf347a0a1dd 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2034,9 +2034,9 @@ static void nf_tables_chain_free_chain_rules(struct nft_chain *chain) kvfree(chain->blob_next); } -void nf_tables_chain_destroy(struct nft_ctx *ctx) +void nf_tables_chain_destroy(struct nft_chain *chain) { - struct nft_chain *chain = ctx->chain; + const struct nft_table *table = chain->table; struct nft_hook *hook, *next; if (WARN_ON(chain->use > 0)) @@ -2048,7 +2048,7 @@ void nf_tables_chain_destroy(struct nft_ctx *ctx) if (nft_is_base_chain(chain)) { struct nft_base_chain *basechain = nft_base_chain(chain); - if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) { + if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) { list_for_each_entry_safe(hook, next, &basechain->hook_list, list) { list_del_rcu(&hook->list); @@ -2515,7 +2515,7 @@ err_chain_add: err_trans: nft_use_dec_restore(&table->use); err_destroy_chain: - nf_tables_chain_destroy(ctx); + nf_tables_chain_destroy(chain); return err; } @@ -3510,8 +3510,11 @@ void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule) kfree(rule); } +/* can only be used if rule is no longer visible to dumps */ static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule) { + lockdep_commit_lock_is_held(ctx->net); + nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); nf_tables_rule_destroy(ctx, rule); } @@ -4567,7 +4570,7 @@ static int nft_set_desc_concat_parse(const struct nlattr *attr, static int nft_set_desc_concat(struct nft_set_desc *desc, const struct nlattr *nla) { - u32 num_regs = 0, key_num_regs = 0; + u32 len = 0, num_regs; struct nlattr *attr; int rem, err, i; @@ -4581,12 +4584,12 @@ static int nft_set_desc_concat(struct nft_set_desc *desc, } for (i = 0; i < desc->field_count; i++) - num_regs += DIV_ROUND_UP(desc->field_len[i], sizeof(u32)); + len += round_up(desc->field_len[i], sizeof(u32)); - key_num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32)); - if (key_num_regs != num_regs) + if (len != desc->klen) return -EINVAL; + num_regs = DIV_ROUND_UP(desc->klen, sizeof(u32)); if (num_regs > NFT_REG32_COUNT) return -E2BIG; @@ -5247,6 +5250,8 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, enum nft_trans_phase phase) { + lockdep_commit_lock_is_held(ctx->net); + switch (phase) { case NFT_TRANS_PREPARE_ERROR: nft_set_trans_unbind(ctx, set); @@ -7239,9 +7244,7 @@ static int nf_tables_updobj(const struct nft_ctx *ctx, struct nft_trans *trans; int err = -ENOMEM; - if (!try_module_get(type->owner)) - return -ENOENT; - + /* caller must have obtained type->owner reference. */ trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ, sizeof(struct nft_trans_obj)); if (!trans) @@ -7309,12 +7312,16 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info, if (info->nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - type = __nft_obj_type_get(objtype, family); - if (WARN_ON_ONCE(!type)) - return -ENOENT; + if (!obj->ops->update) + return 0; + + type = nft_obj_type_get(net, objtype, family); + if (WARN_ON_ONCE(IS_ERR(type))) + return PTR_ERR(type); nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + /* type->owner reference is put when transaction object is released. */ return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj); } @@ -7977,6 +7984,7 @@ static void nft_unregister_flowtable_hook(struct net *net, } static void __nft_unregister_flowtable_net_hooks(struct net *net, + struct nft_flowtable *flowtable, struct list_head *hook_list, bool release_netdev) { @@ -7984,6 +7992,8 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net, list_for_each_entry_safe(hook, next, hook_list, list) { nf_unregister_net_hook(net, &hook->ops); + flowtable->data.type->setup(&flowtable->data, hook->ops.dev, + FLOW_BLOCK_UNBIND); if (release_netdev) { list_del(&hook->list); kfree_rcu(hook, rcu); @@ -7992,9 +8002,10 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net, } static void nft_unregister_flowtable_net_hooks(struct net *net, + struct nft_flowtable *flowtable, struct list_head *hook_list) { - __nft_unregister_flowtable_net_hooks(net, hook_list, false); + __nft_unregister_flowtable_net_hooks(net, flowtable, hook_list, false); } static int nft_register_flowtable_net_hooks(struct net *net, @@ -8616,8 +8627,6 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable) flowtable->data.type->free(&flowtable->data); list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { - flowtable->data.type->setup(&flowtable->data, hook->ops.dev, - FLOW_BLOCK_UNBIND); list_del_rcu(&hook->list); kfree_rcu(hook, rcu); } @@ -8972,9 +8981,10 @@ static void nft_obj_commit_update(struct nft_trans *trans) obj = nft_trans_obj(trans); newobj = nft_trans_obj_newobj(trans); - if (obj->ops->update) - obj->ops->update(obj, newobj); + if (WARN_ON_ONCE(!obj->ops->update)) + return; + obj->ops->update(obj, newobj); nft_obj_destroy(&trans->ctx, newobj); } @@ -8989,7 +8999,7 @@ static void nft_commit_release(struct nft_trans *trans) kfree(nft_trans_chain_name(trans)); break; case NFT_MSG_DELCHAIN: - nf_tables_chain_destroy(&trans->ctx); + nf_tables_chain_destroy(nft_trans_chain(trans)); break; case NFT_MSG_DELRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); @@ -9899,6 +9909,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) &nft_trans_flowtable_hooks(trans), NFT_MSG_DELFLOWTABLE); nft_unregister_flowtable_net_hooks(net, + nft_trans_flowtable(trans), &nft_trans_flowtable_hooks(trans)); } else { list_del_rcu(&nft_trans_flowtable(trans)->list); @@ -9907,6 +9918,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) &nft_trans_flowtable(trans)->hook_list, NFT_MSG_DELFLOWTABLE); nft_unregister_flowtable_net_hooks(net, + nft_trans_flowtable(trans), &nft_trans_flowtable(trans)->hook_list); } break; @@ -9948,7 +9960,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) nf_tables_table_destroy(&trans->ctx); break; case NFT_MSG_NEWCHAIN: - nf_tables_chain_destroy(&trans->ctx); + nf_tables_chain_destroy(nft_trans_chain(trans)); break; case NFT_MSG_NEWRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); @@ -10137,11 +10149,13 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) case NFT_MSG_NEWFLOWTABLE: if (nft_trans_flowtable_update(trans)) { nft_unregister_flowtable_net_hooks(net, + nft_trans_flowtable(trans), &nft_trans_flowtable_hooks(trans)); } else { nft_use_dec_restore(&trans->ctx.table->use); list_del_rcu(&nft_trans_flowtable(trans)->list); nft_unregister_flowtable_net_hooks(net, + nft_trans_flowtable(trans), &nft_trans_flowtable(trans)->hook_list); } break; @@ -10653,23 +10667,43 @@ int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, } EXPORT_SYMBOL_GPL(nft_data_dump); -int __nft_release_basechain(struct nft_ctx *ctx) +static void __nft_release_basechain_now(struct nft_ctx *ctx) { struct nft_rule *rule, *nr; - if (WARN_ON(!nft_is_base_chain(ctx->chain))) + list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { + list_del(&rule->list); + nf_tables_rule_release(ctx, rule); + } + nf_tables_chain_destroy(ctx->chain); +} + +int __nft_release_basechain(struct nft_ctx *ctx) +{ + struct nft_rule *rule; + + if (WARN_ON_ONCE(!nft_is_base_chain(ctx->chain))) return 0; nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain); - list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { - list_del(&rule->list); + list_for_each_entry(rule, &ctx->chain->rules, list) nft_use_dec(&ctx->chain->use); - nf_tables_rule_release(ctx, rule); - } + nft_chain_del(ctx->chain); nft_use_dec(&ctx->table->use); - nf_tables_chain_destroy(ctx); + if (!maybe_get_net(ctx->net)) { + __nft_release_basechain_now(ctx); + return 0; + } + + /* wait for ruleset dumps to complete. Owning chain is no longer in + * lists, so new dumps can't find any of these rules anymore. + */ + synchronize_rcu(); + + __nft_release_basechain_now(ctx); + put_net(ctx->net); return 0; } EXPORT_SYMBOL_GPL(__nft_release_basechain); @@ -10682,7 +10716,8 @@ static void __nft_release_hook(struct net *net, struct nft_table *table) list_for_each_entry(chain, &table->chains, list) __nf_tables_unregister_hook(net, table, chain, true); list_for_each_entry(flowtable, &table->flowtables, list) - __nft_unregister_flowtable_net_hooks(net, &flowtable->hook_list, + __nft_unregister_flowtable_net_hooks(net, flowtable, + &flowtable->hook_list, true); } @@ -10743,10 +10778,9 @@ static void __nft_release_table(struct net *net, struct nft_table *table) nft_obj_destroy(&ctx, obj); } list_for_each_entry_safe(chain, nc, &table->chains, list) { - ctx.chain = chain; nft_chain_del(chain); nft_use_dec(&table->use); - nf_tables_chain_destroy(&ctx); + nf_tables_chain_destroy(chain); } nf_tables_table_destroy(&ctx); } diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c index 781d3a26f5df..8d19bd001277 100644 --- a/net/netfilter/nft_counter.c +++ b/net/netfilter/nft_counter.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include @@ -17,6 +17,11 @@ #include struct nft_counter { + u64_stats_t bytes; + u64_stats_t packets; +}; + +struct nft_counter_tot { s64 bytes; s64 packets; }; @@ -25,25 +30,24 @@ struct nft_counter_percpu_priv { struct nft_counter __percpu *counter; }; -static DEFINE_PER_CPU(seqcount_t, nft_counter_seq); +static DEFINE_PER_CPU(struct u64_stats_sync, nft_counter_sync); static inline void nft_counter_do_eval(struct nft_counter_percpu_priv *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt) { + struct u64_stats_sync *nft_sync; struct nft_counter *this_cpu; - seqcount_t *myseq; local_bh_disable(); this_cpu = this_cpu_ptr(priv->counter); - myseq = this_cpu_ptr(&nft_counter_seq); + nft_sync = this_cpu_ptr(&nft_counter_sync); - write_seqcount_begin(myseq); + u64_stats_update_begin(nft_sync); + u64_stats_add(&this_cpu->bytes, pkt->skb->len); + u64_stats_inc(&this_cpu->packets); + u64_stats_update_end(nft_sync); - this_cpu->bytes += pkt->skb->len; - this_cpu->packets++; - - write_seqcount_end(myseq); local_bh_enable(); } @@ -66,17 +70,16 @@ static int nft_counter_do_init(const struct nlattr * const tb[], if (cpu_stats == NULL) return -ENOMEM; - preempt_disable(); - this_cpu = this_cpu_ptr(cpu_stats); + this_cpu = raw_cpu_ptr(cpu_stats); if (tb[NFTA_COUNTER_PACKETS]) { - this_cpu->packets = - be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); + u64_stats_set(&this_cpu->packets, + be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]))); } if (tb[NFTA_COUNTER_BYTES]) { - this_cpu->bytes = - be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES])); + u64_stats_set(&this_cpu->bytes, + be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]))); } - preempt_enable(); + priv->counter = cpu_stats; return 0; } @@ -104,40 +107,41 @@ static void nft_counter_obj_destroy(const struct nft_ctx *ctx, } static void nft_counter_reset(struct nft_counter_percpu_priv *priv, - struct nft_counter *total) + struct nft_counter_tot *total) { + struct u64_stats_sync *nft_sync; struct nft_counter *this_cpu; - seqcount_t *myseq; local_bh_disable(); this_cpu = this_cpu_ptr(priv->counter); - myseq = this_cpu_ptr(&nft_counter_seq); + nft_sync = this_cpu_ptr(&nft_counter_sync); + + u64_stats_update_begin(nft_sync); + u64_stats_add(&this_cpu->packets, -total->packets); + u64_stats_add(&this_cpu->bytes, -total->bytes); + u64_stats_update_end(nft_sync); - write_seqcount_begin(myseq); - this_cpu->packets -= total->packets; - this_cpu->bytes -= total->bytes; - write_seqcount_end(myseq); local_bh_enable(); } static void nft_counter_fetch(struct nft_counter_percpu_priv *priv, - struct nft_counter *total) + struct nft_counter_tot *total) { struct nft_counter *this_cpu; - const seqcount_t *myseq; u64 bytes, packets; unsigned int seq; int cpu; memset(total, 0, sizeof(*total)); for_each_possible_cpu(cpu) { - myseq = per_cpu_ptr(&nft_counter_seq, cpu); + struct u64_stats_sync *nft_sync = per_cpu_ptr(&nft_counter_sync, cpu); + this_cpu = per_cpu_ptr(priv->counter, cpu); do { - seq = read_seqcount_begin(myseq); - bytes = this_cpu->bytes; - packets = this_cpu->packets; - } while (read_seqcount_retry(myseq, seq)); + seq = u64_stats_fetch_begin(nft_sync); + bytes = u64_stats_read(&this_cpu->bytes); + packets = u64_stats_read(&this_cpu->packets); + } while (u64_stats_fetch_retry(nft_sync, seq)); total->bytes += bytes; total->packets += packets; @@ -148,7 +152,7 @@ static int nft_counter_do_dump(struct sk_buff *skb, struct nft_counter_percpu_priv *priv, bool reset) { - struct nft_counter total; + struct nft_counter_tot total; nft_counter_fetch(priv, &total); @@ -236,7 +240,7 @@ static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src, g struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst); struct nft_counter __percpu *cpu_stats; struct nft_counter *this_cpu; - struct nft_counter total; + struct nft_counter_tot total; nft_counter_fetch(priv, &total); @@ -244,11 +248,9 @@ static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src, g if (cpu_stats == NULL) return -ENOMEM; - preempt_disable(); - this_cpu = this_cpu_ptr(cpu_stats); - this_cpu->packets = total.packets; - this_cpu->bytes = total.bytes; - preempt_enable(); + this_cpu = raw_cpu_ptr(cpu_stats); + u64_stats_set(&this_cpu->packets, total.packets); + u64_stats_set(&this_cpu->bytes, total.bytes); priv_clone->counter = cpu_stats; return 0; @@ -266,17 +268,17 @@ static void nft_counter_offload_stats(struct nft_expr *expr, const struct flow_stats *stats) { struct nft_counter_percpu_priv *priv = nft_expr_priv(expr); + struct u64_stats_sync *nft_sync; struct nft_counter *this_cpu; - seqcount_t *myseq; local_bh_disable(); this_cpu = this_cpu_ptr(priv->counter); - myseq = this_cpu_ptr(&nft_counter_seq); + nft_sync = this_cpu_ptr(&nft_counter_sync); - write_seqcount_begin(myseq); - this_cpu->packets += stats->pkts; - this_cpu->bytes += stats->bytes; - write_seqcount_end(myseq); + u64_stats_update_begin(nft_sync); + u64_stats_add(&this_cpu->packets, stats->pkts); + u64_stats_add(&this_cpu->bytes, stats->bytes); + u64_stats_update_end(nft_sync); local_bh_enable(); } @@ -285,7 +287,7 @@ void nft_counter_init_seqcount(void) int cpu; for_each_possible_cpu(cpu) - seqcount_init(per_cpu_ptr(&nft_counter_seq, cpu)); + u64_stats_init(per_cpu_ptr(&nft_counter_sync, cpu)); } struct nft_expr_type nft_counter_type; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 2bfe3cdfbd58..3641043ca8cc 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -240,6 +240,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, enum ip_conntrack_info ctinfo; u16 value = nft_reg_load16(®s->data[priv->sreg]); struct nf_conn *ct; + int oldcnt; ct = nf_ct_get(skb, &ctinfo); if (ct) /* already tracked */ @@ -260,10 +261,11 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, ct = this_cpu_read(nft_ct_pcpu_template); - if (likely(refcount_read(&ct->ct_general.use) == 1)) { - refcount_inc(&ct->ct_general.use); + __refcount_inc(&ct->ct_general.use, &oldcnt); + if (likely(oldcnt == 1)) { nf_ct_zone_add(ct, &zone); } else { + refcount_dec(&ct->ct_general.use); /* previous skb got queued to userspace, allocate temporary * one until percpu template can be reused. */ @@ -272,6 +274,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, regs->verdict.code = NF_DROP; return; } + __set_bit(IPS_CONFIRMED_BIT, &ct->status); } nf_ct_set(skb, ct, IP_CT_NEW); @@ -378,6 +381,7 @@ static bool nft_ct_tmpl_alloc_pcpu(void) return false; } + __set_bit(IPS_CONFIRMED_BIT, &tmp->status); per_cpu(nft_ct_pcpu_template, cpu) = tmp; } diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index de588f7b69c4..60d18bd60d82 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c @@ -86,7 +86,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, unsigned char optbuf[sizeof(struct ip_options) + 40]; struct ip_options *opt = (struct ip_options *)optbuf; struct iphdr *iph, _iph; - unsigned int start; bool found = false; __be32 info; int optlen; @@ -94,7 +93,6 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (!iph) return -EBADMSG; - start = sizeof(struct iphdr); optlen = iph->ihl * 4 - (int)sizeof(struct iphdr); if (optlen <= 0) @@ -104,7 +102,7 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, /* Copy the options since __ip_options_compile() modifies * the options. */ - if (skb_copy_bits(skb, start, opt->__data, optlen)) + if (skb_copy_bits(skb, sizeof(struct iphdr), opt->__data, optlen)) return -EBADMSG; opt->optlen = optlen; @@ -119,18 +117,18 @@ static int ipv4_find_option(struct net *net, struct sk_buff *skb, found = target == IPOPT_SSRR ? opt->is_strictroute : !opt->is_strictroute; if (found) - *offset = opt->srr + start; + *offset = opt->srr; break; case IPOPT_RR: if (!opt->rr) break; - *offset = opt->rr + start; + *offset = opt->rr; found = true; break; case IPOPT_RA: if (!opt->router_alert) break; - *offset = opt->router_alert + start; + *offset = opt->router_alert; found = true; break; default: diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index 7a8707632a81..9d335aa58907 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -288,6 +288,15 @@ static bool nft_flow_offload_skip(struct sk_buff *skb, int family) return false; } +static void flow_offload_ct_tcp(struct nf_conn *ct) +{ + /* conntrack will not see all packets, disable tcp window validation. */ + spin_lock_bh(&ct->lock); + ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; + ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; + spin_unlock_bh(&ct->lock); +} + static void nft_flow_offload_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -355,11 +364,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr, goto err_flow_alloc; flow_offload_route_init(flow, &route); - - if (tcph) { - ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; - ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL; - } + if (tcph) + flow_offload_ct_tcp(ct); ret = flow_offload_add(flowtable, flow); if (ret < 0) diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 55fcf0280c5c..731511d58b7c 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -221,7 +221,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, list_del(&rule->list); nf_tables_rule_destroy(&chain_ctx, rule); } - nf_tables_chain_destroy(&chain_ctx); + nf_tables_chain_destroy(chain); break; default: break; diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 1fd3b413350d..a592cca7a61f 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -24,10 +24,12 @@ struct nft_rhash { struct rhashtable ht; struct delayed_work gc_work; + u32 wq_gc_seq; }; struct nft_rhash_elem { struct rhash_head node; + u32 wq_gc_seq; struct nft_set_ext ext; }; @@ -308,7 +310,8 @@ static bool nft_rhash_expr_needs_gc_run(const struct nft_set *set, nft_setelem_expr_foreach(expr, elem_expr, size) { if (expr->ops->gc && - expr->ops->gc(read_pnet(&set->net), expr)) + expr->ops->gc(read_pnet(&set->net), expr) && + set->flags & NFT_SET_EVAL) return true; } @@ -339,6 +342,10 @@ static void nft_rhash_gc(struct work_struct *work) if (!gc) goto done; + /* Elements never collected use a zero gc worker sequence number. */ + if (unlikely(++priv->wq_gc_seq == 0)) + priv->wq_gc_seq++; + rhashtable_walk_enter(&priv->ht, &hti); rhashtable_walk_start(&hti); @@ -356,6 +363,14 @@ static void nft_rhash_gc(struct work_struct *work) goto try_later; } + /* rhashtable walk is unstable, already seen in this gc run? + * Then, skip this element. In case of (unlikely) sequence + * wraparound and stale element wq_gc_seq, next gc run will + * just find this expired element. + */ + if (he->wq_gc_seq == priv->wq_gc_seq) + continue; + if (nft_set_elem_is_dead(&he->ext)) goto dead_elem; @@ -372,6 +387,8 @@ dead_elem: if (!gc) goto try_later; + /* annotate gc sequence for this attempt. */ + he->wq_gc_seq = priv->wq_gc_seq; nft_trans_gc_elem_add(gc, he); } diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c index b8d3c3213efe..c15db28c5ebc 100644 --- a/net/netfilter/nft_set_pipapo_avx2.c +++ b/net/netfilter/nft_set_pipapo_avx2.c @@ -994,8 +994,9 @@ static int nft_pipapo_avx2_lookup_8b_16(unsigned long *map, unsigned long *fill, NFT_PIPAPO_AVX2_BUCKET_LOAD8(5, lt, 8, pkt[8], bsize); NFT_PIPAPO_AVX2_AND(6, 2, 3); + NFT_PIPAPO_AVX2_AND(3, 4, 7); NFT_PIPAPO_AVX2_BUCKET_LOAD8(7, lt, 9, pkt[9], bsize); - NFT_PIPAPO_AVX2_AND(0, 4, 5); + NFT_PIPAPO_AVX2_AND(0, 3, 5); NFT_PIPAPO_AVX2_BUCKET_LOAD8(1, lt, 10, pkt[10], bsize); NFT_PIPAPO_AVX2_AND(2, 6, 7); NFT_PIPAPO_AVX2_BUCKET_LOAD8(3, lt, 11, pkt[11], bsize); diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c index 4148df6d6a47..2d33674e9e5e 100644 --- a/net/netfilter/nft_socket.c +++ b/net/netfilter/nft_socket.c @@ -68,7 +68,7 @@ static noinline int nft_socket_cgroup_subtree_level(void) cgroup_put(cgrp); - if (WARN_ON_ONCE(level > 255)) + if (level > 255) return -ERANGE; if (WARN_ON_ONCE(level < 0)) diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c index efb505445eac..d026982a00fc 100644 --- a/net/netfilter/nft_tunnel.c +++ b/net/netfilter/nft_tunnel.c @@ -333,13 +333,13 @@ static int nft_tunnel_obj_erspan_init(const struct nlattr *attr, static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = { [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 }, [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 }, - [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 }, + [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 127 }, }; static int nft_tunnel_obj_geneve_init(const struct nlattr *attr, struct nft_tunnel_opts *opts) { - struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len; + struct geneve_opt *opt = (struct geneve_opt *)(opts->u.data + opts->len); struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1]; int err, data_len; @@ -621,7 +621,7 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb, if (!inner) goto failure; while (opts->len > offset) { - opt = (struct geneve_opt *)opts->u.data + offset; + opt = (struct geneve_opt *)(opts->u.data + offset); if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS, opt->opt_class) || nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE, diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c index 211bfa2a2ac0..c2c10a536cc6 100644 --- a/net/netfilter/xt_LED.c +++ b/net/netfilter/xt_LED.c @@ -97,7 +97,9 @@ static int led_tg_check(const struct xt_tgchk_param *par) struct xt_led_info_internal *ledinternal; int err; - if (ledinfo->id[0] == '\0') + /* Bail out if empty string or not a string at all. */ + if (ledinfo->id[0] == '\0' || + !memchr(ledinfo->id, '\0', sizeof(ledinfo->id))) return -EINVAL; mutex_lock(&xt_led_mutex); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9eb87f35bc65..8a74847dacaf 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -393,15 +393,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) static void netlink_sock_destruct(struct sock *sk) { - struct netlink_sock *nlk = nlk_sk(sk); - - if (nlk->cb_running) { - if (nlk->cb.done) - nlk->cb.done(&nlk->cb); - module_put(nlk->cb.module); - kfree_skb(nlk->cb.skb); - } - skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { @@ -414,14 +405,6 @@ static void netlink_sock_destruct(struct sock *sk) WARN_ON(nlk_sk(sk)->groups); } -static void netlink_sock_destruct_work(struct work_struct *work) -{ - struct netlink_sock *nlk = container_of(work, struct netlink_sock, - work); - - sk_free(&nlk->sk); -} - /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves @@ -736,12 +719,6 @@ static void deferred_put_nlk_sk(struct rcu_head *head) if (!refcount_dec_and_test(&sk->sk_refcnt)) return; - if (nlk->cb_running && nlk->cb.done) { - INIT_WORK(&nlk->work, netlink_sock_destruct_work); - schedule_work(&nlk->work); - return; - } - sk_free(sk); } @@ -791,6 +768,14 @@ static int netlink_release(struct socket *sock) NETLINK_URELEASE, &n); } + /* Terminate any outstanding dump */ + if (nlk->cb_running) { + if (nlk->cb.done) + nlk->cb.done(&nlk->cb); + module_put(nlk->cb.module); + kfree_skb(nlk->cb.skb); + } + module_put(nlk->module); if (netlink_is_kernel(sk)) { diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index b30b8fc760f7..aa430e4d58d8 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -4,7 +4,6 @@ #include #include -#include #include /* flags */ @@ -48,7 +47,6 @@ struct netlink_sock { struct rhash_head node; struct rcu_head rcu; - struct work_struct work; }; static inline struct netlink_sock *nlk_sk(struct sock *sk) diff --git a/net/netlink/diag.c b/net/netlink/diag.c index 9c4f231be275..7b15aa5f7bc2 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -241,6 +241,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) } static const struct sock_diag_handler netlink_diag_handler = { + .owner = THIS_MODULE, .family = AF_NETLINK, .dump = netlink_diag_handler_dump, }; diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c index bd2b17b219ae..0b270893ee14 100644 --- a/net/netrom/nr_route.c +++ b/net/netrom/nr_route.c @@ -754,6 +754,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25) int ret; struct sk_buff *skbn; + /* + * Reject malformed packets early. Check that it contains at least 2 + * addresses and 1 byte more for Time-To-Live + */ + if (skb->len < 2 * sizeof(ax25_address) + 1) + return 0; nr_src = (ax25_address *)(skb->data + 0); nr_dest = (ax25_address *)(skb->data + 7); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 645677f84dba..cd0fd26196b8 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -252,10 +252,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), + optval, optlen); + if (err) break; - } if (opt > LLCP_MAX_RW) { err = -EINVAL; @@ -274,10 +274,10 @@ static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname, break; } - if (copy_from_sockptr(&opt, optval, sizeof(u32))) { - err = -EFAULT; + err = copy_safe_from_sockptr(&opt, sizeof(opt), + optval, optlen); + if (err) break; - } if (opt > LLCP_MAX_MIUX) { err = -EINVAL; diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c index 78c4b6addf15..1057d5347e55 100644 --- a/net/nfc/nci/hci.c +++ b/net/nfc/nci/hci.c @@ -540,6 +540,8 @@ static u8 nci_hci_create_pipe(struct nci_dev *ndev, u8 dest_host, pr_debug("pipe created=%d\n", pipe); + if (pipe >= NCI_HCI_MAX_PIPES) + pipe = NCI_HCI_INVALID_PIPE; return pipe; } diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 21102ffe4470..a87c25e06baf 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -913,7 +913,9 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, { struct vport *vport = ovs_vport_rcu(dp, out_port); - if (likely(vport && netif_carrier_ok(vport->dev))) { + if (likely(vport && + netif_running(vport->dev) && + netif_carrier_ok(vport->dev))) { u16 mru = OVS_CB(skb)->mru; u32 cutlen = OVS_CB(skb)->cutlen; @@ -924,12 +926,6 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, pskb_trim(skb, ovs_mac_header_len(key)); } - /* Need to set the pkt_type to involve the routing layer. The - * packet movement through the OVS datapath doesn't generally - * use routing, but this is needed for tunnel cases. - */ - skb->pkt_type = PACKET_OUTGOING; - if (likely(!mru || (skb->len <= mru + vport->dev->hard_header_len))) { ovs_vport_send(vport, skb, ovs_key_mac_proto(key)); @@ -958,8 +954,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, upcall.cmd = OVS_PACKET_CMD_ACTION; upcall.mru = OVS_CB(skb)->mru; - for (a = nla_data(attr), rem = nla_len(attr); rem > 0; - a = nla_next(a, &rem)) { + nla_for_each_nested(a, attr, rem) { switch (nla_type(a)) { case OVS_USERSPACE_ATTR_USERDATA: upcall.userdata = a; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 3c7b24535409..a2d8b1b4c83e 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2076,6 +2076,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, { struct ovs_header *ovs_header; struct ovs_vport_stats vport_stats; + struct net *net_vport; int err; ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family, @@ -2092,12 +2093,15 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, nla_put_u32(skb, OVS_VPORT_ATTR_IFINDEX, vport->dev->ifindex)) goto nla_put_failure; - if (!net_eq(net, dev_net(vport->dev))) { - int id = peernet2id_alloc(net, dev_net(vport->dev), gfp); + rcu_read_lock(); + net_vport = dev_net_rcu(vport->dev); + if (!net_eq(net, net_vport)) { + int id = peernet2id_alloc(net, net_vport, GFP_ATOMIC); if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id)) - goto nla_put_failure; + goto nla_put_failure_unlock; } + rcu_read_unlock(); ovs_vport_get_stats(vport, &vport_stats); if (nla_put_64bit(skb, OVS_VPORT_ATTR_STATS, @@ -2115,6 +2119,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb, genlmsg_end(skb, ovs_header); return 0; +nla_put_failure_unlock: + rcu_read_unlock(); nla_put_failure: err = -EMSGSIZE; error: diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index e3c85ceb1f0a..0ed3953dbe52 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2302,14 +2302,10 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) OVS_FLOW_ATTR_MASK, true, skb); } -#define MAX_ACTIONS_BUFSIZE (32 * 1024) - static struct sw_flow_actions *nla_alloc_flow_actions(int size) { struct sw_flow_actions *sfa; - WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); - sfa = kmalloc(kmalloc_size_roundup(sizeof(*sfa) + size), GFP_KERNEL); if (!sfa) return ERR_PTR(-ENOMEM); @@ -2465,15 +2461,6 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2); - if (new_acts_size > MAX_ACTIONS_BUFSIZE) { - if ((next_offset + req_size) > MAX_ACTIONS_BUFSIZE) { - OVS_NLERR(log, "Flow action size exceeds max %u", - MAX_ACTIONS_BUFSIZE); - return ERR_PTR(-EMSGSIZE); - } - new_acts_size = MAX_ACTIONS_BUFSIZE; - } - acts = nla_alloc_flow_actions(new_acts_size); if (IS_ERR(acts)) return (void *)acts; @@ -2873,7 +2860,8 @@ static int validate_set(const struct nlattr *a, size_t key_len; /* There can be only one key in a action */ - if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) + if (!nla_ok(ovs_key, nla_len(a)) || + nla_total_size(nla_len(ovs_key)) != nla_len(a)) return -EINVAL; key_len = nla_len(ovs_key); @@ -3492,7 +3480,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, int err; u32 mpls_label_count = 0; - *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE)); + *sfa = nla_alloc_flow_actions(nla_len(attr)); if (IS_ERR(*sfa)) return PTR_ERR(*sfa); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c9c813f731c6..f3bd3e126703 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -541,10 +541,8 @@ static void *packet_current_frame(struct packet_sock *po, return packet_lookup_frame(po, rb, rb->head, status); } -static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev) +static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev) { - u8 *skb_orig_data = skb->data; - int skb_orig_len = skb->len; struct vlan_hdr vhdr, *vh; unsigned int header_len; @@ -565,33 +563,21 @@ static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev) else return 0; - skb_push(skb, skb->data - skb_mac_header(skb)); - vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr); - if (skb_orig_data != skb->data) { - skb->data = skb_orig_data; - skb->len = skb_orig_len; - } + vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len, + sizeof(vhdr), &vhdr); if (unlikely(!vh)) return 0; return ntohs(vh->h_vlan_TCI); } -static __be16 vlan_get_protocol_dgram(struct sk_buff *skb) +static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb) { __be16 proto = skb->protocol; - if (unlikely(eth_type_vlan(proto))) { - u8 *skb_orig_data = skb->data; - int skb_orig_len = skb->len; - - skb_push(skb, skb->data - skb_mac_header(skb)); - proto = __vlan_get_protocol(skb, proto, NULL); - if (skb_orig_data != skb->data) { - skb->data = skb_orig_data; - skb->len = skb_orig_len; - } - } + if (unlikely(eth_type_vlan(proto))) + proto = __vlan_get_protocol_offset(skb, proto, + skb_mac_offset(skb), NULL); return proto; } @@ -3418,18 +3404,18 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; + po = pkt_sk(sk); + err = packet_alloc_pending(po); + if (err) + goto out_sk_free; + sock_init_data(sock, sk); - po = pkt_sk(sk); init_completion(&po->skb_completion); sk->sk_family = PF_PACKET; po->num = proto; po->xmit = dev_queue_xmit; - err = packet_alloc_pending(po); - if (err) - goto out2; - packet_cached_dev_reset(po); sk->sk_destruct = packet_sock_destruct; @@ -3462,7 +3448,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, sock_prot_inuse_add(net, &packet_proto, 1); return 0; -out2: +out_sk_free: sk_free(sk); out: return err; diff --git a/net/packet/diag.c b/net/packet/diag.c index a68a84574c73..057ee37bd076 100644 --- a/net/packet/diag.c +++ b/net/packet/diag.c @@ -245,6 +245,7 @@ static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) } static const struct sock_diag_handler packet_diag_handler = { + .owner = THIS_MODULE, .family = AF_PACKET, .dump = packet_diag_handler_dump, }; diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index f74baefd855d..2df5bf240b64 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c @@ -30,8 +30,12 @@ static int rfkill_gpio_set_power(void *data, bool blocked) { struct rfkill_gpio_data *rfkill = data; - if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled) - clk_enable(rfkill->clk); + if (!blocked && !IS_ERR(rfkill->clk) && !rfkill->clk_enabled) { + int ret = clk_enable(rfkill->clk); + + if (ret) + return ret; + } gpiod_set_value_cansleep(rfkill->shutdown_gpio, !blocked); gpiod_set_value_cansleep(rfkill->reset_gpio, !blocked); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index 29b74a569e0b..b21c2ce40192 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -397,15 +397,15 @@ static int rose_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; struct rose_sock *rose = rose_sk(sk); - int opt; + unsigned int opt; if (level != SOL_ROSE) return -ENOPROTOOPT; - if (optlen < sizeof(int)) + if (optlen < sizeof(unsigned int)) return -EINVAL; - if (copy_from_sockptr(&opt, optval, sizeof(int))) + if (copy_from_sockptr(&opt, optval, sizeof(unsigned int))) return -EFAULT; switch (optname) { @@ -414,31 +414,31 @@ static int rose_setsockopt(struct socket *sock, int level, int optname, return 0; case ROSE_T1: - if (opt < 1) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; rose->t1 = opt * HZ; return 0; case ROSE_T2: - if (opt < 1) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; rose->t2 = opt * HZ; return 0; case ROSE_T3: - if (opt < 1) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; rose->t3 = opt * HZ; return 0; case ROSE_HOLDBACK: - if (opt < 1) + if (opt < 1 || opt > UINT_MAX / HZ) return -EINVAL; rose->hb = opt * HZ; return 0; case ROSE_IDLE: - if (opt < 0) + if (opt > UINT_MAX / (60 * HZ)) return -EINVAL; rose->idle = opt * 60 * HZ; return 0; @@ -701,11 +701,9 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct net_device *dev; ax25_address *source; ax25_uid_assoc *user; + int err = -EINVAL; int n; - if (!sock_flag(sk, SOCK_ZAPPED)) - return -EINVAL; - if (addr_len != sizeof(struct sockaddr_rose) && addr_len != sizeof(struct full_sockaddr_rose)) return -EINVAL; @@ -718,8 +716,15 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) return -EINVAL; - if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) - return -EADDRNOTAVAIL; + lock_sock(sk); + + if (!sock_flag(sk, SOCK_ZAPPED)) + goto out_release; + + err = -EADDRNOTAVAIL; + dev = rose_dev_get(&addr->srose_addr); + if (!dev) + goto out_release; source = &addr->srose_call; @@ -730,7 +735,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) } else { if (ax25_uid_policy && !capable(CAP_NET_BIND_SERVICE)) { dev_put(dev); - return -EACCES; + err = -EACCES; + goto out_release; } rose->source_call = *source; } @@ -753,8 +759,10 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) rose_insert_socket(sk); sock_reset_flag(sk, SOCK_ZAPPED); - - return 0; + err = 0; +out_release: + release_sock(sk); + return err; } static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) diff --git a/net/rose/rose_timer.c b/net/rose/rose_timer.c index f06ddbed3fed..1525773e94aa 100644 --- a/net/rose/rose_timer.c +++ b/net/rose/rose_timer.c @@ -122,6 +122,10 @@ static void rose_heartbeat_expiry(struct timer_list *t) struct rose_sock *rose = rose_sk(sk); bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { + sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ/20); + goto out; + } switch (rose->state) { case ROSE_STATE_0: /* Magic here: If we listen() and a new link dies before it @@ -152,6 +156,7 @@ static void rose_heartbeat_expiry(struct timer_list *t) } rose_start_heartbeat(sk); +out: bh_unlock_sock(sk); sock_put(sk); } @@ -162,6 +167,10 @@ static void rose_timer_expiry(struct timer_list *t) struct sock *sk = &rose->sock; bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { + sk_reset_timer(sk, &rose->timer, jiffies + HZ/20); + goto out; + } switch (rose->state) { case ROSE_STATE_1: /* T1 */ case ROSE_STATE_4: /* T2 */ @@ -182,6 +191,7 @@ static void rose_timer_expiry(struct timer_list *t) } break; } +out: bh_unlock_sock(sk); sock_put(sk); } @@ -192,6 +202,10 @@ static void rose_idletimer_expiry(struct timer_list *t) struct sock *sk = &rose->sock; bh_lock_sock(sk); + if (sock_owned_by_user(sk)) { + sk_reset_timer(sk, &rose->idletimer, jiffies + HZ/20); + goto out; + } rose_clear_queues(sk); rose_write_internal(sk, ROSE_CLEAR_REQUEST); @@ -207,6 +221,7 @@ static void rose_idletimer_expiry(struct timer_list *t) sk->sk_state_change(sk); sock_set_flag(sk, SOCK_DEAD); } +out: bh_unlock_sock(sk); sock_put(sk); } diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index ceba28e9dce6..9b3efe6d580f 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -641,9 +641,10 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname, ret = -EISCONN; if (rx->sk.sk_state != RXRPC_UNBOUND) goto error; - ret = copy_from_sockptr(&min_sec_level, optval, - sizeof(unsigned int)); - if (ret < 0) + ret = copy_safe_from_sockptr(&min_sec_level, + sizeof(min_sec_level), + optval, optlen); + if (ret) goto error; ret = -EINVAL; if (min_sec_level > RXRPC_SECURITY_MAX) diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 05bd1e9bca36..b75e22756c4b 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -61,7 +61,7 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, { struct tc_cookie *old; - old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); + old = unrcu_pointer(xchg(old_cookie, RCU_INITIALIZER(new_cookie))); if (old) call_rcu(&old->rcu, tcf_free_cookie_rcu); } diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 36395e5db3b4..bbc34987bd09 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -255,31 +255,31 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit); m_eaction = READ_ONCE(m->tcfm_eaction); + is_redirect = tcf_mirred_is_act_redirect(m_eaction); retval = READ_ONCE(m->tcf_action); dev = rcu_dereference_bh(m->tcfm_dev); if (unlikely(!dev)) { pr_notice_once("tc mirred: target device is gone\n"); - goto out; + goto err_cant_do; } if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) { net_notice_ratelimited("tc mirred to Houston: device %s is down\n", dev->name); - goto out; + goto err_cant_do; } /* we could easily avoid the clone only if called by ingress and clsact; * since we can't easily detect the clsact caller, skip clone only for * ingress - that covers the TC S/W datapath. */ - is_redirect = tcf_mirred_is_act_redirect(m_eaction); at_ingress = skb_at_tc_ingress(skb); use_reinsert = at_ingress && is_redirect && tcf_mirred_can_reinsert(retval); if (!use_reinsert) { skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) - goto out; + goto err_cant_do; } want_ingress = tcf_mirred_act_wants_ingress(m_eaction); @@ -321,12 +321,16 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, } err = tcf_mirred_forward(want_ingress, skb2); - if (err) { -out: + if (err) tcf_action_inc_overlimit_qstats(&m->common); - if (tcf_mirred_is_act_redirect(m_eaction)) - retval = TC_ACT_SHOT; - } + __this_cpu_dec(mirred_nest_level); + + return retval; + +err_cant_do: + if (is_redirect) + retval = TC_ACT_SHOT; + tcf_action_inc_overlimit_qstats(&m->common); __this_cpu_dec(mirred_nest_level); return retval; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 2691a3d8e451..55a9dff1cc1e 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -66,7 +66,7 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, }; static const struct nla_policy diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 445ab1b0537d..89da596be1b8 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1824,6 +1824,7 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb, struct tcmsg *tcm; struct nlmsghdr *nlh; unsigned char *b = skb_tail_pointer(skb); + int ret = -EMSGSIZE; nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags); if (!nlh) @@ -1868,11 +1869,45 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb, return skb->len; +cls_op_not_supp: + ret = -EOPNOTSUPP; out_nlmsg_trim: nla_put_failure: -cls_op_not_supp: nlmsg_trim(skb, b); - return -1; + return ret; +} + +static struct sk_buff *tfilter_notify_prep(struct net *net, + struct sk_buff *oskb, + struct nlmsghdr *n, + struct tcf_proto *tp, + struct tcf_block *block, + struct Qdisc *q, u32 parent, + void *fh, int event, + u32 portid, bool rtnl_held, + struct netlink_ext_ack *extack) +{ + unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE; + struct sk_buff *skb; + int ret; + +retry: + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) + return ERR_PTR(-ENOBUFS); + + ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, + n->nlmsg_seq, n->nlmsg_flags, event, false, + rtnl_held, extack); + if (ret <= 0) { + kfree_skb(skb); + if (ret == -EMSGSIZE) { + size += NLMSG_GOODSIZE; + goto retry; + } + return ERR_PTR(-EINVAL); + } + return skb; } static int tfilter_notify(struct net *net, struct sk_buff *oskb, @@ -1885,16 +1920,13 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; int err = 0; - skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOBUFS; + if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) + return 0; - if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, - n->nlmsg_seq, n->nlmsg_flags, event, - false, rtnl_held, extack) <= 0) { - kfree_skb(skb); - return -EINVAL; - } + skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event, + portid, rtnl_held, extack); + if (IS_ERR(skb)) + return PTR_ERR(skb); if (unicast) err = rtnl_unicast(skb, net, portid); @@ -1914,16 +1946,14 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb, u32 portid = oskb ? NETLINK_CB(oskb).portid : 0; int err; - skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); - if (!skb) - return -ENOBUFS; + if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC)) + return tp->ops->delete(tp, fh, last, rtnl_held, extack); - if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, - n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER, - false, rtnl_held, extack) <= 0) { + skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, + RTM_DELTFILTER, portid, rtnl_held, extack); + if (IS_ERR(skb)) { NL_SET_ERR_MSG(extack, "Failed to build del event notification"); - kfree_skb(skb); - return -EINVAL; + return PTR_ERR(skb); } err = tp->ops->delete(tp, fh, last, rtnl_held, extack); @@ -2731,6 +2761,9 @@ static int tc_chain_notify(struct tcf_chain *chain, struct sk_buff *oskb, struct sk_buff *skb; int err = 0; + if (!unicast && !rtnl_notify_needed(net, flags, RTNLGRP_TC)) + return 0; + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; @@ -2760,6 +2793,9 @@ static int tc_chain_notify_delete(const struct tcf_proto_ops *tmplt_ops, struct net *net = block->net; struct sk_buff *skb; + if (!rtnl_notify_needed(net, flags, RTNLGRP_TC)) + return 0; + skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) return -ENOBUFS; diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 014cd3de7b5d..7657d86ad142 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -354,7 +354,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { [TCA_FLOW_KEYS] = { .type = NLA_U32 }, [TCA_FLOW_MODE] = { .type = NLA_U32 }, [TCA_FLOW_BASECLASS] = { .type = NLA_U32 }, - [TCA_FLOW_RSHIFT] = { .type = NLA_U32 }, + [TCA_FLOW_RSHIFT] = NLA_POLICY_MAX(NLA_U32, + 31 /* BITS_PER_U32 - 1 */), [TCA_FLOW_ADDEND] = { .type = NLA_U32 }, [TCA_FLOW_MASK] = { .type = NLA_U32 }, [TCA_FLOW_XOR] = { .type = NLA_U32 }, diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 10e6ec0f9498..a40a9e84c75f 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -732,7 +732,7 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = { [TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY, - .len = 128 }, + .len = 127 }, }; static const struct nla_policy @@ -1279,7 +1279,6 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, int err; md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len]; - memset(md, 0xff, sizeof(*md)); md->version = 1; if (!depth) @@ -1308,9 +1307,9 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index"); return -EINVAL; } + memset(&md->u.index, 0xff, sizeof(md->u.index)); if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; - memset(&md->u, 0x00, sizeof(md->u)); md->u.index = nla_get_be32(nla); } } else if (md->version == 2) { @@ -1319,10 +1318,12 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid"); return -EINVAL; } + md->u.md2.dir = 1; if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]; md->u.md2.dir = nla_get_u8(nla); } + set_hwid(&md->u.md2, 0xff); if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]; set_hwid(&md->u.md2, nla_get_u8(nla)); diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 04448bfb4d3d..e87d79d043d5 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -70,7 +70,7 @@ struct tc_u_hnode { struct tc_u_hnode __rcu *next; u32 handle; u32 prio; - int refcnt; + refcount_t refcnt; unsigned int divisor; struct idr handle_idr; bool is_root; @@ -85,12 +85,22 @@ struct tc_u_hnode { struct tc_u_common { struct tc_u_hnode __rcu *hlist; void *ptr; - int refcnt; + refcount_t refcnt; struct idr handle_idr; struct hlist_node hnode; long knodes; }; +static u32 handle2id(u32 h) +{ + return ((h & 0x80000000) ? ((h >> 20) & 0x7FF) : h); +} + +static u32 id2handle(u32 id) +{ + return (id | 0x800U) << 20; +} + static inline unsigned int u32_hash_fold(__be32 key, const struct tc_u32_sel *sel, u8 fshift) @@ -308,7 +318,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr) int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL); if (id < 0) return 0; - return (id | 0x800U) << 20; + return id2handle(id); } static struct hlist_head *tc_u_common_hash; @@ -357,8 +367,8 @@ static int u32_init(struct tcf_proto *tp) if (root_ht == NULL) return -ENOBUFS; - root_ht->refcnt++; - root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000; + refcount_set(&root_ht->refcnt, 1); + root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : id2handle(0); root_ht->prio = tp->prio; root_ht->is_root = true; idr_init(&root_ht->handle_idr); @@ -369,18 +379,20 @@ static int u32_init(struct tcf_proto *tp) kfree(root_ht); return -ENOBUFS; } + refcount_set(&tp_c->refcnt, 1); tp_c->ptr = key; INIT_HLIST_NODE(&tp_c->hnode); idr_init(&tp_c->handle_idr); hlist_add_head(&tp_c->hnode, tc_u_hash(key)); + } else { + refcount_inc(&tp_c->refcnt); } - tp_c->refcnt++; RCU_INIT_POINTER(root_ht->next, tp_c->hlist); rcu_assign_pointer(tp_c->hlist, root_ht); - root_ht->refcnt++; + /* root_ht must be destroyed when tcf_proto is destroyed */ rcu_assign_pointer(tp->root, root_ht); tp->data = tp_c; return 0; @@ -391,7 +403,7 @@ static void __u32_destroy_key(struct tc_u_knode *n) struct tc_u_hnode *ht = rtnl_dereference(n->ht_down); tcf_exts_destroy(&n->exts); - if (ht && --ht->refcnt == 0) + if (ht && refcount_dec_and_test(&ht->refcnt)) kfree(ht); kfree(n); } @@ -599,8 +611,6 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, struct tc_u_hnode __rcu **hn; struct tc_u_hnode *phn; - WARN_ON(--ht->refcnt); - u32_clear_hnode(tp, ht, extack); hn = &tp_c->hlist; @@ -610,7 +620,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht, if (phn == ht) { u32_clear_hw_hnode(tp, ht, extack); idr_destroy(&ht->handle_idr); - idr_remove(&tp_c->handle_idr, ht->handle); + idr_remove(&tp_c->handle_idr, handle2id(ht->handle)); RCU_INIT_POINTER(*hn, ht->next); kfree_rcu(ht, rcu); return 0; @@ -628,10 +638,10 @@ static void u32_destroy(struct tcf_proto *tp, bool rtnl_held, WARN_ON(root_ht == NULL); - if (root_ht && --root_ht->refcnt == 1) + if (root_ht && refcount_dec_and_test(&root_ht->refcnt)) u32_destroy_hnode(tp, root_ht, extack); - if (--tp_c->refcnt == 0) { + if (refcount_dec_and_test(&tp_c->refcnt)) { struct tc_u_hnode *ht; hlist_del(&tp_c->hnode); @@ -643,7 +653,7 @@ static void u32_destroy(struct tcf_proto *tp, bool rtnl_held, /* u32_destroy_key() will later free ht for us, if it's * still referenced by some knode */ - if (--ht->refcnt == 0) + if (refcount_dec_and_test(&ht->refcnt)) kfree_rcu(ht, rcu); } @@ -672,7 +682,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, return -EINVAL; } - if (ht->refcnt == 1) { + if (refcount_dec_if_one(&ht->refcnt)) { u32_destroy_hnode(tp, ht, extack); } else { NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter"); @@ -680,7 +690,7 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last, } out: - *last = tp_c->refcnt == 1 && tp_c->knodes == 0; + *last = refcount_read(&tp_c->refcnt) == 1 && tp_c->knodes == 0; return ret; } @@ -764,14 +774,14 @@ static int u32_set_parms(struct net *net, struct tcf_proto *tp, NL_SET_ERR_MSG_MOD(extack, "Not linking to root node"); return -EINVAL; } - ht_down->refcnt++; + refcount_inc(&ht_down->refcnt); } ht_old = rtnl_dereference(n->ht_down); rcu_assign_pointer(n->ht_down, ht_down); if (ht_old) - ht_old->refcnt--; + refcount_dec(&ht_old->refcnt); } if (ifindex >= 0) @@ -850,7 +860,7 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, /* bump reference count as long as we hold pointer to structure */ if (ht) - ht->refcnt++; + refcount_inc(&ht->refcnt); return new; } @@ -930,7 +940,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, ht_old = rtnl_dereference(n->ht_down); if (ht_old) - ht_old->refcnt++; + refcount_inc(&ht_old->refcnt); } __u32_destroy_key(new); return err; @@ -978,7 +988,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return err; } } - ht->refcnt = 1; + refcount_set(&ht->refcnt, 1); ht->divisor = divisor; ht->handle = handle; ht->prio = tp->prio; @@ -987,7 +997,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, err = u32_replace_hw_hnode(tp, ht, userflags, extack); if (err) { - idr_remove(&tp_c->handle_idr, handle); + idr_remove(&tp_c->handle_idr, handle2id(handle)); kfree(ht); return err; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index fe053e717260..c395e7a98232 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1638,6 +1638,10 @@ replay: q = qdisc_lookup(dev, tcm->tcm_handle); if (!q) goto create_n_graft; + if (q->parent != tcm->tcm_parent) { + NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent"); + return -EINVAL; + } if (n->nlmsg_flags & NLM_F_EXCL) { NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override"); return -EEXIST; @@ -2196,6 +2200,12 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, return -EOPNOTSUPP; } + /* Prevent creation of traffic classes with classid TC_H_ROOT */ + if (clid == TC_H_ROOT) { + NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT"); + return -EINVAL; + } + new_cl = cl; err = -EOPNOTSUPP; if (cops->change) diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c index 73e8caeffd47..12dd4d41605c 100644 --- a/net/sched/sch_cake.c +++ b/net/sched/sch_cake.c @@ -643,6 +643,63 @@ static bool cake_ddst(int flow_mode) return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST; } +static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q, + struct cake_flow *flow, + int flow_mode) +{ + if (likely(cake_dsrc(flow_mode) && + q->hosts[flow->srchost].srchost_bulk_flow_count)) + q->hosts[flow->srchost].srchost_bulk_flow_count--; +} + +static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q, + struct cake_flow *flow, + int flow_mode) +{ + if (likely(cake_dsrc(flow_mode) && + q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES)) + q->hosts[flow->srchost].srchost_bulk_flow_count++; +} + +static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q, + struct cake_flow *flow, + int flow_mode) +{ + if (likely(cake_ddst(flow_mode) && + q->hosts[flow->dsthost].dsthost_bulk_flow_count)) + q->hosts[flow->dsthost].dsthost_bulk_flow_count--; +} + +static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q, + struct cake_flow *flow, + int flow_mode) +{ + if (likely(cake_ddst(flow_mode) && + q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES)) + q->hosts[flow->dsthost].dsthost_bulk_flow_count++; +} + +static u16 cake_get_flow_quantum(struct cake_tin_data *q, + struct cake_flow *flow, + int flow_mode) +{ + u16 host_load = 1; + + if (cake_dsrc(flow_mode)) + host_load = max(host_load, + q->hosts[flow->srchost].srchost_bulk_flow_count); + + if (cake_ddst(flow_mode)) + host_load = max(host_load, + q->hosts[flow->dsthost].dsthost_bulk_flow_count); + + /* The get_random_u16() is a way to apply dithering to avoid + * accumulating roundoff errors + */ + return (q->flow_quantum * quantum_div[host_load] + + get_random_u16()) >> 16; +} + static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, int flow_mode, u16 flow_override, u16 host_override) { @@ -789,10 +846,8 @@ skip_hash: allocate_dst = cake_ddst(flow_mode); if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { - if (allocate_src) - q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; - if (allocate_dst) - q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; + cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode); + cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode); } found: /* reserve queue for future packets in same flow */ @@ -817,9 +872,10 @@ found: q->hosts[outer_hash + k].srchost_tag = srchost_hash; found_src: srchost_idx = outer_hash + k; - if (q->flows[reduced_hash].set == CAKE_SET_BULK) - q->hosts[srchost_idx].srchost_bulk_flow_count++; q->flows[reduced_hash].srchost = srchost_idx; + + if (q->flows[reduced_hash].set == CAKE_SET_BULK) + cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode); } if (allocate_dst) { @@ -840,9 +896,10 @@ found_src: q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; found_dst: dsthost_idx = outer_hash + k; - if (q->flows[reduced_hash].set == CAKE_SET_BULK) - q->hosts[dsthost_idx].dsthost_bulk_flow_count++; q->flows[reduced_hash].dsthost = dsthost_idx; + + if (q->flows[reduced_hash].set == CAKE_SET_BULK) + cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode); } } @@ -1541,7 +1598,6 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) b->backlogs[idx] -= len; b->tin_backlog -= len; sch->qstats.backlog -= len; - qdisc_tree_reduce_backlog(sch, 1, len); flow->dropped++; b->tin_dropped++; @@ -1552,6 +1608,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free) __qdisc_drop(skb, to_free); sch->q.qlen--; + qdisc_tree_reduce_backlog(sch, 1, len); cake_heapify(q, 0); @@ -1855,10 +1912,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* flowchain */ if (!flow->set || flow->set == CAKE_SET_DECAYING) { - struct cake_host *srchost = &b->hosts[flow->srchost]; - struct cake_host *dsthost = &b->hosts[flow->dsthost]; - u16 host_load = 1; - if (!flow->set) { list_add_tail(&flow->flowchain, &b->new_flows); } else { @@ -1868,18 +1921,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, flow->set = CAKE_SET_SPARSE; b->sparse_flow_count++; - if (cake_dsrc(q->flow_mode)) - host_load = max(host_load, srchost->srchost_bulk_flow_count); - - if (cake_ddst(q->flow_mode)) - host_load = max(host_load, dsthost->dsthost_bulk_flow_count); - - flow->deficit = (b->flow_quantum * - quantum_div[host_load]) >> 16; + flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode); } else if (flow->set == CAKE_SET_SPARSE_WAIT) { - struct cake_host *srchost = &b->hosts[flow->srchost]; - struct cake_host *dsthost = &b->hosts[flow->dsthost]; - /* this flow was empty, accounted as a sparse flow, but actually * in the bulk rotation. */ @@ -1887,12 +1930,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, b->sparse_flow_count--; b->bulk_flow_count++; - if (cake_dsrc(q->flow_mode)) - srchost->srchost_bulk_flow_count++; - - if (cake_ddst(q->flow_mode)) - dsthost->dsthost_bulk_flow_count++; - + cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode); + cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode); } if (q->buffer_used > q->buffer_max_used) @@ -1949,13 +1988,11 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch) { struct cake_sched_data *q = qdisc_priv(sch); struct cake_tin_data *b = &q->tins[q->cur_tin]; - struct cake_host *srchost, *dsthost; ktime_t now = ktime_get(); struct cake_flow *flow; struct list_head *head; bool first_flow = true; struct sk_buff *skb; - u16 host_load; u64 delay; u32 len; @@ -2055,11 +2092,6 @@ retry: q->cur_flow = flow - b->flows; first_flow = false; - /* triple isolation (modified DRR++) */ - srchost = &b->hosts[flow->srchost]; - dsthost = &b->hosts[flow->dsthost]; - host_load = 1; - /* flow isolation (DRR++) */ if (flow->deficit <= 0) { /* Keep all flows with deficits out of the sparse and decaying @@ -2071,11 +2103,8 @@ retry: b->sparse_flow_count--; b->bulk_flow_count++; - if (cake_dsrc(q->flow_mode)) - srchost->srchost_bulk_flow_count++; - - if (cake_ddst(q->flow_mode)) - dsthost->dsthost_bulk_flow_count++; + cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode); + cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode); flow->set = CAKE_SET_BULK; } else { @@ -2087,19 +2116,7 @@ retry: } } - if (cake_dsrc(q->flow_mode)) - host_load = max(host_load, srchost->srchost_bulk_flow_count); - - if (cake_ddst(q->flow_mode)) - host_load = max(host_load, dsthost->dsthost_bulk_flow_count); - - WARN_ON(host_load > CAKE_QUEUES); - - /* The get_random_u16() is a way to apply dithering to avoid - * accumulating roundoff errors - */ - flow->deficit += (b->flow_quantum * quantum_div[host_load] + - get_random_u16()) >> 16; + flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode); list_move_tail(&flow->flowchain, &b->old_flows); goto retry; @@ -2123,11 +2140,8 @@ retry: if (flow->set == CAKE_SET_BULK) { b->bulk_flow_count--; - if (cake_dsrc(q->flow_mode)) - srchost->srchost_bulk_flow_count--; - - if (cake_ddst(q->flow_mode)) - dsthost->dsthost_bulk_flow_count--; + cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode); + cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode); b->decaying_flow_count++; } else if (flow->set == CAKE_SET_SPARSE || @@ -2145,12 +2159,8 @@ retry: else if (flow->set == CAKE_SET_BULK) { b->bulk_flow_count--; - if (cake_dsrc(q->flow_mode)) - srchost->srchost_bulk_flow_count--; - - if (cake_ddst(q->flow_mode)) - dsthost->dsthost_bulk_flow_count--; - + cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode); + cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode); } else b->decaying_flow_count--; diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c index cac870eb7897..0567a15d0f85 100644 --- a/net/sched/sch_cbs.c +++ b/net/sched/sch_cbs.c @@ -310,7 +310,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) { struct ethtool_link_ksettings ecmd; int speed = SPEED_10; - int port_rate; + s64 port_rate; int err; err = __ethtool_get_link_ksettings(dev, &ecmd); diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c index 3ac3e5c80b6f..e38cf3428701 100644 --- a/net/sched/sch_choke.c +++ b/net/sched/sch_choke.c @@ -123,10 +123,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx, if (idx == q->tail) choke_zap_tail_holes(q); + --sch->q.qlen; qdisc_qstats_backlog_dec(sch, skb); qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); qdisc_drop(skb, sch, to_free); - --sch->q.qlen; } struct choke_skb_cb { diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c index d7a4874543de..63c02040b426 100644 --- a/net/sched/sch_codel.c +++ b/net/sched/sch_codel.c @@ -95,10 +95,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch) &q->stats, qdisc_pkt_len, codel_get_enqueue_time, drop_func, dequeue_func); - /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, - * or HTB crashes. Defer it for next round. - */ - if (q->stats.drop_count && sch->q.qlen) { + if (q->stats.drop_count) { qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); q->stats.drop_count = 0; q->stats.drop_len = 0; @@ -171,7 +168,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt, qlen = sch->q.qlen; while (sch->q.qlen > sch->limit) { - struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); + struct sk_buff *skb = qdisc_dequeue_internal(sch, true); dropped += qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb); diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index e35a4e90f4e6..3aee041c7bdb 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -36,6 +36,11 @@ struct drr_sched { struct Qdisc_class_hash clhash; }; +static bool cl_is_active(struct drr_class *cl) +{ + return !list_empty(&cl->alist); +} + static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) { struct drr_sched *q = qdisc_priv(sch); @@ -106,6 +111,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return -ENOBUFS; gnet_stats_basic_sync_init(&cl->bstats); + INIT_LIST_HEAD(&cl->alist); cl->common.classid = classid; cl->quantum = quantum; cl->qdisc = qdisc_create_dflt(sch->dev_queue, @@ -228,7 +234,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg) { struct drr_class *cl = (struct drr_class *)arg; - list_del(&cl->alist); + list_del_init(&cl->alist); } static int drr_dump_class(struct Qdisc *sch, unsigned long arg, @@ -335,7 +341,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct drr_sched *q = qdisc_priv(sch); struct drr_class *cl; int err = 0; - bool first; cl = drr_classify(skb, sch, &err); if (cl == NULL) { @@ -345,7 +350,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -355,7 +359,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - if (first) { + if (!cl_is_active(cl)) { list_add_tail(&cl->alist, &q->active); cl->deficit = cl->quantum; } @@ -389,7 +393,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) if (unlikely(skb == NULL)) goto out; if (cl->qdisc->q.qlen == 0) - list_del(&cl->alist); + list_del_init(&cl->alist); bstats_update(&cl->bstats, skb); qdisc_bstats_update(sch, skb); @@ -430,7 +434,7 @@ static void drr_reset_qdisc(struct Qdisc *sch) for (i = 0; i < q->clhash.hashsize; i++) { hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { if (cl->qdisc->q.qlen) - list_del(&cl->alist); + list_del_init(&cl->alist); qdisc_reset(cl->qdisc); } } diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c index b10efeaf0629..9da86db4d2c2 100644 --- a/net/sched/sch_ets.c +++ b/net/sched/sch_ets.c @@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = { [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 }, }; +static bool cl_is_active(struct ets_class *cl) +{ + return !list_empty(&cl->alist); +} + static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr, unsigned int *quantum, struct netlink_ext_ack *extack) @@ -91,6 +96,8 @@ ets_class_from_arg(struct Qdisc *sch, unsigned long arg) { struct ets_sched *q = qdisc_priv(sch); + if (arg == 0 || arg > q->nbands) + return NULL; return &q->classes[arg - 1]; } @@ -291,7 +298,7 @@ static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg) * to remove them. */ if (!ets_class_is_strict(q, cl) && sch->q.qlen) - list_del(&cl->alist); + list_del_init(&cl->alist); } static int ets_class_dump(struct Qdisc *sch, unsigned long arg, @@ -414,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct ets_sched *q = qdisc_priv(sch); struct ets_class *cl; int err = 0; - bool first; cl = ets_classify(skb, sch, &err); if (!cl) { @@ -424,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { if (net_xmit_drop_count(err)) { @@ -434,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, return err; } - if (first && !ets_class_is_strict(q, cl)) { + if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) { list_add_tail(&cl->alist, &q->active); cl->deficit = cl->quantum; } @@ -486,7 +491,7 @@ static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch) if (unlikely(!skb)) goto out; if (cl->qdisc->q.qlen == 0) - list_del(&cl->alist); + list_del_init(&cl->alist); return ets_qdisc_dequeue_skb(sch, skb); } @@ -655,7 +660,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt, } for (i = q->nbands; i < oldbands; i++) { if (i >= q->nstrict && q->classes[i].qdisc->q.qlen) - list_del(&q->classes[i].alist); + list_del_init(&q->classes[i].alist); qdisc_tree_flush_backlog(q->classes[i].qdisc); } q->nstrict = nstrict; @@ -711,7 +716,7 @@ static void ets_qdisc_reset(struct Qdisc *sch) for (band = q->nstrict; band < q->nbands; band++) { if (q->classes[band].qdisc->q.qlen) - list_del(&q->classes[band].alist); + list_del_init(&q->classes[band].alist); } for (band = 0; band < q->nbands; band++) qdisc_reset(q->classes[band].qdisc); diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index e1040421b797..af5f2ab69b8d 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c @@ -39,6 +39,9 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, { unsigned int prev_backlog; + if (unlikely(READ_ONCE(sch->limit) == 0)) + return qdisc_drop(skb, sch, to_free); + if (likely(sch->q.qlen < sch->limit)) return qdisc_enqueue_tail(skb, sch); diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index f59a2cb2c803..91f5ef6be0f2 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -901,7 +901,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt, sch_tree_lock(sch); } while (sch->q.qlen > sch->limit) { - struct sk_buff *skb = fq_dequeue(sch); + struct sk_buff *skb = qdisc_dequeue_internal(sch, false); if (!skb) break; diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 8c4fee063436..47b5a056165c 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -314,10 +314,8 @@ begin: } qdisc_bstats_update(sch, skb); flow->deficit -= qdisc_pkt_len(skb); - /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, - * or HTB crashes. Defer it for next round. - */ - if (q->cstats.drop_count && sch->q.qlen) { + + if (q->cstats.drop_count) { qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); q->cstats.drop_count = 0; @@ -433,7 +431,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt, while (sch->q.qlen > sch->limit || q->memory_usage > q->memory_limit) { - struct sk_buff *skb = fq_codel_dequeue(sch); + struct sk_buff *skb = qdisc_dequeue_internal(sch, false); q->cstats.drop_len += qdisc_pkt_len(skb); rtnl_kfree_skbs(skb, skb); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 68e6acd0f130..607c580d75e4 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -357,7 +357,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, /* Drop excess packets if new limit is lower */ while (sch->q.qlen > sch->limit) { - struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); + struct sk_buff *skb = qdisc_dequeue_internal(sch, false); len_dropped += qdisc_pkt_len(skb); num_dropped += 1; diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 872d127c9db4..fa7a1b69c0f3 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -913,7 +913,8 @@ static void gred_destroy(struct Qdisc *sch) for (i = 0; i < table->DPs; i++) gred_destroy_vq(table->tab[i]); - gred_offload(sch, TC_GRED_DESTROY); + if (table->opt) + gred_offload(sch, TC_GRED_DESTROY); kfree(table->opt); } diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 54dddc2ff502..61b91de8065f 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c @@ -176,6 +176,11 @@ struct hfsc_sched { #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ +static bool cl_in_el_or_vttree(struct hfsc_class *cl) +{ + return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) || + ((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node)); +} /* * eligible tree holds backlogged classes being sorted by their eligible times. @@ -204,7 +209,10 @@ eltree_insert(struct hfsc_class *cl) static inline void eltree_remove(struct hfsc_class *cl) { - rb_erase(&cl->el_node, &cl->sched->eligible); + if (!RB_EMPTY_NODE(&cl->el_node)) { + rb_erase(&cl->el_node, &cl->sched->eligible); + RB_CLEAR_NODE(&cl->el_node); + } } static inline void @@ -959,6 +967,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl != NULL) { int old_flags; + int len = 0; if (parentid) { if (cl->cl_parent && @@ -989,9 +998,13 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (usc != NULL) hfsc_change_usc(cl, usc, cur_time); + if (cl->qdisc->q.qlen != 0) + len = qdisc_peek_len(cl->qdisc); + /* Check queue length again since some qdisc implementations + * (e.g., netem/codel) might empty the queue during the peek + * operation. + */ if (cl->qdisc->q.qlen != 0) { - int len = qdisc_peek_len(cl->qdisc); - if (cl->cl_flags & HFSC_RSC) { if (old_flags & HFSC_RSC) update_ed(cl, len); @@ -1033,6 +1046,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, if (cl == NULL) return -ENOBUFS; + RB_CLEAR_NODE(&cl->el_node); + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); if (err) { kfree(cl); @@ -1217,7 +1232,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg) /* vttree is now handled in update_vf() so that update_vf(cl, 0, 0) * needs to be called explicitly to remove a class from vttree. */ - update_vf(cl, 0, 0); + if (cl->cl_nactive) + update_vf(cl, 0, 0); if (cl->cl_flags & HFSC_RSC) eltree_remove(cl); } @@ -1559,7 +1575,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) return err; } - if (first) { + sch->qstats.backlog += len; + sch->q.qlen++; + + if (first && !cl_in_el_or_vttree(cl)) { if (cl->cl_flags & HFSC_RSC) init_ed(cl, len); if (cl->cl_flags & HFSC_FSC) @@ -1574,9 +1593,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) } - sch->qstats.backlog += len; - sch->q.qlen++; - return NET_XMIT_SUCCESS; } @@ -1631,10 +1647,16 @@ hfsc_dequeue(struct Qdisc *sch) if (cl->qdisc->q.qlen != 0) { /* update ed */ next_len = qdisc_peek_len(cl->qdisc); - if (realtime) - update_ed(cl, next_len); - else - update_d(cl, next_len); + /* Check queue length again since some qdisc implementations + * (e.g., netem/codel) might empty the queue during the peek + * operation. + */ + if (cl->qdisc->q.qlen != 0) { + if (realtime) + update_ed(cl, next_len); + else + update_d(cl, next_len); + } } else { /* the class becomes passive */ eltree_remove(cl); diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c index d26cd436cbe3..83fc44f20e31 100644 --- a/net/sched/sch_hhf.c +++ b/net/sched/sch_hhf.c @@ -560,7 +560,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt, qlen = sch->q.qlen; prev_backlog = sch->qstats.backlog; while (sch->q.qlen > sch->limit) { - struct sk_buff *skb = hhf_dequeue(sch); + struct sk_buff *skb = qdisc_dequeue_internal(sch, false); rtnl_kfree_skbs(skb, skb); } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index d23f8ea63082..29f394fe3998 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -345,7 +345,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q, */ static inline void htb_next_rb_node(struct rb_node **n) { - *n = rb_next(*n); + if (*n) + *n = rb_next(*n); } /** @@ -606,8 +607,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) */ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) { - WARN_ON(!cl->prio_activity); - + if (!cl->prio_activity) + return; htb_deactivate_prios(q, cl); cl->prio_activity = 0; } @@ -1733,8 +1734,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg, if (cl->parent) cl->parent->children--; - if (cl->prio_activity) - htb_deactivate(q, cl); + htb_deactivate(q, cl); if (cl->cmode != HTB_CAN_SEND) htb_safe_rb_erase(&cl->pq_node, @@ -1946,8 +1946,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, /* turn parent into inner node */ qdisc_purge_queue(parent->leaf.q); parent_qdisc = parent->leaf.q; - if (parent->prio_activity) - htb_deactivate(q, parent); + htb_deactivate(q, parent); /* remove from evt list because of level change */ if (parent->cmode != HTB_CAN_SEND) { diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 0eba06613dcd..cb38e58ee771 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -77,6 +77,8 @@ struct netem_sched_data { struct sk_buff *t_head; struct sk_buff *t_tail; + u32 t_len; + /* optional qdisc for classful handling (NULL at netem init) */ struct Qdisc *qdisc; @@ -373,6 +375,7 @@ static void tfifo_reset(struct Qdisc *sch) rtnl_kfree_skbs(q->t_head, q->t_tail); q->t_head = NULL; q->t_tail = NULL; + q->t_len = 0; } static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) @@ -402,6 +405,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) rb_link_node(&nskb->rbnode, parent, p); rb_insert_color(&nskb->rbnode, &q->t_root); } + q->t_len++; sch->q.qlen++; } @@ -508,7 +512,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, 1<q.qlen >= sch->limit)) { + if (unlikely(q->t_len >= sch->limit)) { /* re-link segs, so that qdisc_drop_all() frees them all */ skb->next = segs; qdisc_drop_all(skb, sch, to_free); @@ -692,8 +696,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch) tfifo_dequeue: skb = __qdisc_dequeue_head(&sch->q); if (skb) { - qdisc_qstats_backlog_dec(sch, skb); deliver: + qdisc_qstats_backlog_dec(sch, skb); qdisc_bstats_update(sch, skb); return skb; } @@ -709,8 +713,7 @@ deliver: if (time_to_send <= now && q->slot.slot_next <= now) { netem_erase_head(q, skb); - sch->q.qlen--; - qdisc_qstats_backlog_dec(sch, skb); + q->t_len--; skb->next = NULL; skb->prev = NULL; /* skb->dev shares skb->rbnode area, @@ -736,17 +739,22 @@ deliver: if (err != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(err)) qdisc_qstats_drop(sch); + sch->qstats.backlog -= pkt_len; + sch->q.qlen--; qdisc_tree_reduce_backlog(sch, 1, pkt_len); } goto tfifo_dequeue; } + sch->q.qlen--; goto deliver; } if (q->qdisc) { skb = q->qdisc->ops->dequeue(q->qdisc); - if (skb) + if (skb) { + sch->q.qlen--; goto deliver; + } } qdisc_watchdog_schedule_ns(&q->watchdog, @@ -756,8 +764,10 @@ deliver: if (q->qdisc) { skb = q->qdisc->ops->dequeue(q->qdisc); - if (skb) + if (skb) { + sch->q.qlen--; goto deliver; + } } return NULL; } diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index b60b31ef71cc..e1bb151a9719 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -190,7 +190,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt, /* Drop excess packets if new limit is lower */ qlen = sch->q.qlen; while (sch->q.qlen > sch->limit) { - struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); + struct sk_buff *skb = qdisc_dequeue_internal(sch, true); dropped += qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb); diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index ed01634af82c..6462468bf77c 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -204,6 +204,11 @@ struct qfq_sched { */ enum update_reason {enqueue, requeue}; +static bool cl_is_active(struct qfq_class *cl) +{ + return !list_empty(&cl->alist); +} + static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) { struct qfq_sched *q = qdisc_priv(sch); @@ -349,7 +354,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) struct qfq_aggregate *agg = cl->agg; - list_del(&cl->alist); /* remove from RR queue of the aggregate */ + list_del_init(&cl->alist); /* remove from RR queue of the aggregate */ if (list_empty(&agg->active)) /* agg is now inactive */ qfq_deactivate_agg(q, agg); } @@ -478,6 +483,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, gnet_stats_basic_sync_init(&cl->bstats); cl->common.classid = classid; cl->deficit = lmax; + INIT_LIST_HEAD(&cl->alist); cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid, NULL); @@ -984,7 +990,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg, cl->deficit -= (int) len; if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ - list_del(&cl->alist); + list_del_init(&cl->alist); else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) { cl->deficit += agg->lmax; list_move_tail(&cl->alist, &agg->active); @@ -1216,7 +1222,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct qfq_class *cl; struct qfq_aggregate *agg; int err = 0; - bool first; cl = qfq_classify(skb, sch, &err); if (cl == NULL) { @@ -1238,7 +1243,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, } gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; - first = !cl->qdisc->q.qlen; err = qdisc_enqueue(skb, cl->qdisc, to_free); if (unlikely(err != NET_XMIT_SUCCESS)) { pr_debug("qfq_enqueue: enqueue failed %d\n", err); @@ -1254,8 +1258,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, ++sch->q.qlen; agg = cl->agg; - /* if the queue was not empty, then done here */ - if (!first) { + /* if the class is active, then done here */ + if (cl_is_active(cl)) { if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) && list_first_entry(&agg->active, struct qfq_class, alist) == cl && cl->deficit < len) @@ -1417,6 +1421,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) struct qfq_sched *q = qdisc_priv(sch); struct qfq_class *cl = (struct qfq_class *)arg; + if (list_empty(&cl->alist)) + return; qfq_deactivate_class(q, cl); } diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 66dcb18638fe..002941d35b64 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -77,12 +77,6 @@ #define SFQ_EMPTY_SLOT 0xffff #define SFQ_DEFAULT_HASH_DIVISOR 1024 -/* We use 16 bits to store allot, and want to handle packets up to 64K - * Scale allot by 8 (1<<3) so that no overflow occurs. - */ -#define SFQ_ALLOT_SHIFT 3 -#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT) - /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */ typedef u16 sfq_index; @@ -104,7 +98,7 @@ struct sfq_slot { sfq_index next; /* next slot in sfq RR chain */ struct sfq_head dep; /* anchor in dep[] chains */ unsigned short hash; /* hash value (index in ht[]) */ - short allot; /* credit for this slot */ + int allot; /* credit for this slot */ unsigned int backlog; struct red_vars vars; @@ -120,7 +114,6 @@ struct sfq_sched_data { siphash_key_t perturbation; u8 cur_depth; /* depth of longest slot */ u8 flags; - unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ struct tcf_proto __rcu *filter_list; struct tcf_block *block; sfq_index *ht; /* Hash table ('divisor' slots) */ @@ -456,7 +449,7 @@ enqueue: */ q->tail = slot; /* We could use a bigger initial quantum for new flows */ - slot->allot = q->scaled_quantum; + slot->allot = q->quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; @@ -493,7 +486,7 @@ next_slot: slot = &q->slots[a]; if (slot->allot <= 0) { q->tail = slot; - slot->allot += q->scaled_quantum; + slot->allot += q->quantum; goto next_slot; } skb = slot_dequeue_head(slot); @@ -512,7 +505,7 @@ next_slot: } q->tail->next = next_a; } else { - slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb)); + slot->allot -= qdisc_pkt_len(skb); } return skb; } @@ -595,7 +588,7 @@ drop: q->tail->next = x; } q->tail = slot; - slot->allot = q->scaled_quantum; + slot->allot = q->quantum; } } sch->q.qlen -= dropped; @@ -608,6 +601,7 @@ static void sfq_perturbation(struct timer_list *t) struct Qdisc *sch = q->sch; spinlock_t *root_lock; siphash_key_t nkey; + int period; get_random_bytes(&nkey, sizeof(nkey)); rcu_read_lock(); @@ -618,12 +612,17 @@ static void sfq_perturbation(struct timer_list *t) sfq_rehash(sch); spin_unlock(root_lock); - if (q->perturb_period) - mod_timer(&q->perturb_timer, jiffies + q->perturb_period); + /* q->perturb_period can change under us from + * sfq_change() and sfq_destroy(). + */ + period = READ_ONCE(q->perturb_period); + if (period) + mod_timer(&q->perturb_timer, jiffies + period); rcu_read_unlock(); } -static int sfq_change(struct Qdisc *sch, struct nlattr *opt) +static int sfq_change(struct Qdisc *sch, struct nlattr *opt, + struct netlink_ext_ack *extack) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); @@ -632,6 +631,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) struct red_parms *p = NULL; struct sk_buff *to_free = NULL; struct sk_buff *tail = NULL; + unsigned int maxflows; + unsigned int quantum; + unsigned int divisor; + int perturb_period; + u8 headdrop; + u8 maxdepth; + int limit; + u8 flags; + if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; @@ -641,14 +649,10 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; - /* slot->allot is a short, make sure quantum is not too big. */ - if (ctl->quantum) { - unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum); - - if (scaled <= 0 || scaled > SHRT_MAX) - return -EINVAL; + if ((int)ctl->quantum < 0) { + NL_SET_ERR_MSG_MOD(extack, "invalid quantum"); + return -EINVAL; } - if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Scell_log, NULL)) return -EINVAL; @@ -657,37 +661,64 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) if (!p) return -ENOMEM; } + sch_tree_lock(sch); - if (ctl->quantum) { - q->quantum = ctl->quantum; - q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); - } - q->perturb_period = ctl->perturb_period * HZ; + + limit = q->limit; + divisor = q->divisor; + headdrop = q->headdrop; + maxdepth = q->maxdepth; + maxflows = q->maxflows; + perturb_period = q->perturb_period; + quantum = q->quantum; + flags = q->flags; + + /* update and validate configuration */ + if (ctl->quantum) + quantum = ctl->quantum; + perturb_period = ctl->perturb_period * HZ; if (ctl->flows) - q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); + maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { - q->divisor = ctl->divisor; - q->maxflows = min_t(u32, q->maxflows, q->divisor); + divisor = ctl->divisor; + maxflows = min_t(u32, maxflows, divisor); } if (ctl_v1) { if (ctl_v1->depth) - q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); + maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); if (p) { - swap(q->red_parms, p); - red_set_parms(q->red_parms, + red_set_parms(p, ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Plog, ctl_v1->Scell_log, NULL, ctl_v1->max_P); } - q->flags = ctl_v1->flags; - q->headdrop = ctl_v1->headdrop; + flags = ctl_v1->flags; + headdrop = ctl_v1->headdrop; } if (ctl->limit) { - q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); - q->maxflows = min_t(u32, q->maxflows, q->limit); + limit = min_t(u32, ctl->limit, maxdepth * maxflows); + maxflows = min_t(u32, maxflows, limit); } + if (limit == 1) { + sch_tree_unlock(sch); + kfree(p); + NL_SET_ERR_MSG_MOD(extack, "invalid limit"); + return -EINVAL; + } + + /* commit configuration */ + q->limit = limit; + q->divisor = divisor; + q->headdrop = headdrop; + q->maxdepth = maxdepth; + q->maxflows = maxflows; + WRITE_ONCE(q->perturb_period, perturb_period); + q->quantum = quantum; + q->flags = flags; + if (p) + swap(q->red_parms, p); qlen = sch->q.qlen; while (sch->q.qlen > q->limit) { @@ -724,7 +755,7 @@ static void sfq_destroy(struct Qdisc *sch) struct sfq_sched_data *q = qdisc_priv(sch); tcf_block_put(q->block); - q->perturb_period = 0; + WRITE_ONCE(q->perturb_period, 0); del_timer_sync(&q->perturb_timer); sfq_free(q->ht); sfq_free(q->slots); @@ -757,12 +788,11 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt, q->divisor = SFQ_DEFAULT_HASH_DIVISOR; q->maxflows = SFQ_DEFAULT_FLOWS; q->quantum = psched_mtu(qdisc_dev(sch)); - q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); q->perturb_period = 0; get_random_bytes(&q->perturbation, sizeof(q->perturbation)); if (opt) { - int err = sfq_change(sch, opt); + int err = sfq_change(sch, opt, extack); if (err) return err; } @@ -873,7 +903,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl, if (idx != SFQ_EMPTY_SLOT) { const struct sfq_slot *slot = &q->slots[idx]; - xstats.allot = slot->allot << SFQ_ALLOT_SHIFT; + xstats.allot = slot->allot; qs.qlen = slot->qlen; qs.backlog = slot->backlog; } diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c index 5df2dacb7b1a..05aa363a7fee 100644 --- a/net/sched/sch_skbprio.c +++ b/net/sched/sch_skbprio.c @@ -121,8 +121,6 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, /* Check to update highest and lowest priorities. */ if (skb_queue_empty(lp_qdisc)) { if (q->lowest_prio == q->highest_prio) { - /* The incoming packet is the only packet in queue. */ - BUG_ON(sch->q.qlen != 1); q->lowest_prio = prio; q->highest_prio = prio; } else { @@ -154,7 +152,6 @@ static struct sk_buff *skbprio_dequeue(struct Qdisc *sch) /* Update highest priority field. */ if (skb_queue_empty(hpq)) { if (q->lowest_prio == q->highest_prio) { - BUG_ON(sch->q.qlen); q->highest_prio = 0; q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; } else { diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 1d5cdc987abd..62219f23f76a 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -915,11 +915,6 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, list_for_each_entry(entry, &new->entries, list) cycle = ktime_add_ns(cycle, entry->interval); - if (!cycle) { - NL_SET_ERR_MSG(extack, "'cycle_time' can never be 0"); - return -EINVAL; - } - if (cycle < 0 || cycle > INT_MAX) { NL_SET_ERR_MSG(extack, "'cycle_time' is too big"); return -EINVAL; @@ -928,6 +923,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, new->cycle_time = cycle; } + if (new->cycle_time < new->num_entries * length_to_duration(q, ETH_ZLEN)) { + NL_SET_ERR_MSG(extack, "'cycle_time' is too small"); + return -EINVAL; + } + return 0; } diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 277ad11f4d61..c4c91b55e98b 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -207,7 +207,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, struct tbf_sched_data *q = qdisc_priv(sch); struct sk_buff *segs, *nskb; netdev_features_t features = netif_skb_features(skb); - unsigned int len = 0, prev_len = qdisc_pkt_len(skb); + unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len; int ret, nb; segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); @@ -218,21 +218,27 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, nb = 0; skb_list_walk_safe(segs, segs, nskb) { skb_mark_not_on_list(segs); - qdisc_skb_cb(segs)->pkt_len = segs->len; - len += segs->len; + seg_len = segs->len; + qdisc_skb_cb(segs)->pkt_len = seg_len; ret = qdisc_enqueue(segs, q->qdisc, to_free); if (ret != NET_XMIT_SUCCESS) { if (net_xmit_drop_count(ret)) qdisc_qstats_drop(sch); } else { nb++; + len += seg_len; } } sch->q.qlen += nb; - if (nb > 1) + sch->qstats.backlog += len; + if (nb > 0) { qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); - consume_skb(skb); - return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP; + consume_skb(skb); + return NET_XMIT_SUCCESS; + } + + kfree_skb(skb); + return NET_XMIT_DROP; } static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 2965a12fe8aa..8b97b13d4c2f 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -137,7 +137,8 @@ static struct sctp_association *sctp_association_init( = 5 * asoc->rto_max; asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; - asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; + asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = + (unsigned long)sp->autoclose * HZ; /* Initializes the timers */ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index bcd3384ab07a..036dc574af4f 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -497,9 +497,7 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, continue; fl4->fl4_sport = laddr->a.v4.sin_port; - flowi4_update_output(fl4, - asoc->base.sk->sk_bound_dev_if, - RT_CONN_FLAGS_TOS(asoc->base.sk, tos), + flowi4_update_output(fl4, asoc->base.sk->sk_bound_dev_if, daddr->v4.sin_addr.s_addr, laddr->a.v4.sin_addr.s_addr); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 0b201cd811a1..65162d67c3a3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -70,8 +70,9 @@ /* Forward declarations for internal helper functions. */ static bool sctp_writeable(const struct sock *sk); static void sctp_wfree(struct sk_buff *skb); -static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, - size_t msg_len); +static int sctp_wait_for_sndbuf(struct sctp_association *asoc, + struct sctp_transport *transport, + long *timeo_p, size_t msg_len); static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); static int sctp_wait_for_accept(struct sock *sk, long timeo); @@ -1826,7 +1827,7 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, if (sctp_wspace(asoc) <= 0 || !sk_wmem_schedule(sk, msg_len)) { timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); - err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); + err = sctp_wait_for_sndbuf(asoc, transport, &timeo, msg_len); if (err) goto err; if (unlikely(sinfo->sinfo_stream >= asoc->stream.outcnt)) { @@ -9203,8 +9204,9 @@ void sctp_sock_rfree(struct sk_buff *skb) /* Helper function to wait for space in the sndbuf. */ -static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, - size_t msg_len) +static int sctp_wait_for_sndbuf(struct sctp_association *asoc, + struct sctp_transport *transport, + long *timeo_p, size_t msg_len) { struct sock *sk = asoc->base.sk; long current_timeo = *timeo_p; @@ -9214,7 +9216,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, *timeo_p, msg_len); - /* Increment the association's refcnt. */ + /* Increment the transport and association's refcnt. */ + if (transport) + sctp_transport_hold(transport); sctp_association_hold(asoc); /* Wait on the association specific sndbuf space. */ @@ -9223,7 +9227,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, TASK_INTERRUPTIBLE); if (asoc->base.dead) goto do_dead; - if (!*timeo_p) + if ((!*timeo_p) || (transport && transport->dead)) goto do_nonblock; if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING) goto do_error; @@ -9248,7 +9252,9 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, out: finish_wait(&asoc->wait, &wait); - /* Release the association's refcnt. */ + /* Release the transport and association's refcnt. */ + if (transport) + sctp_transport_put(transport); sctp_association_put(asoc); return err; diff --git a/net/sctp/stream.c b/net/sctp/stream.c index ee6514af830f..0527728aee98 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -735,7 +735,7 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( * value SHOULD be the smallest TSN not acknowledged by the * receiver of the request plus 2^31. */ - init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31); + init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1U << 31); sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, init_tsn, GFP_ATOMIC); diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 43ebf090029d..f3d09998c24d 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -380,7 +380,8 @@ static struct ctl_table sctp_net_table[] = { static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { - struct net *net = current->nsproxy->net_ns; + struct net *net = container_of(ctl->data, struct net, + sctp.sctp_hmac_alg); struct ctl_table tbl; bool changed = false; char *none = "none"; @@ -425,7 +426,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { - struct net *net = current->nsproxy->net_ns; + struct net *net = container_of(ctl->data, struct net, sctp.rto_min); unsigned int min = *(unsigned int *) ctl->extra1; unsigned int max = *(unsigned int *) ctl->extra2; struct ctl_table tbl; @@ -453,7 +454,7 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { - struct net *net = current->nsproxy->net_ns; + struct net *net = container_of(ctl->data, struct net, sctp.rto_max); unsigned int min = *(unsigned int *) ctl->extra1; unsigned int max = *(unsigned int *) ctl->extra2; struct ctl_table tbl; @@ -491,7 +492,7 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, static int proc_sctp_do_auth(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { - struct net *net = current->nsproxy->net_ns; + struct net *net = container_of(ctl->data, struct net, sctp.auth_enable); struct ctl_table tbl; int new_value, ret; @@ -517,10 +518,12 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write, return ret; } +static DEFINE_MUTEX(sctp_sysctl_mutex); + static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { - struct net *net = current->nsproxy->net_ns; + struct net *net = container_of(ctl->data, struct net, sctp.udp_port); unsigned int min = *(unsigned int *)ctl->extra1; unsigned int max = *(unsigned int *)ctl->extra2; struct ctl_table tbl; @@ -541,6 +544,7 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, if (new_value > max || new_value < min) return -EINVAL; + mutex_lock(&sctp_sysctl_mutex); net->sctp.udp_port = new_value; sctp_udp_sock_stop(net); if (new_value) { @@ -553,6 +557,7 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, lock_sock(sk); sctp_sk(sk)->udp_port = htons(net->sctp.udp_port); release_sock(sk); + mutex_unlock(&sctp_sysctl_mutex); } return ret; @@ -561,7 +566,8 @@ static int proc_sctp_do_udp_port(struct ctl_table *ctl, int write, static int proc_sctp_do_probe_interval(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos) { - struct net *net = current->nsproxy->net_ns; + struct net *net = container_of(ctl->data, struct net, + sctp.probe_interval); struct ctl_table tbl; int ret, new_value; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 2990365c2f2c..87ed33b9db1b 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -117,6 +117,8 @@ fail: */ void sctp_transport_free(struct sctp_transport *transport) { + transport->dead = 1; + /* Try to delete the heartbeat timer. */ if (del_timer(&transport->hb_timer)) sctp_transport_put(transport); diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index e86db21fef6e..c951e5c483b5 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -1860,6 +1860,7 @@ static void smc_listen_out(struct smc_sock *new_smc) if (tcp_sk(new_smc->clcsock->sk)->syn_smc) atomic_dec(&lsmc->queued_smc_hs); + release_sock(newsmcsk); /* lock in smc_listen_work() */ if (lsmc->sk.sk_state == SMC_LISTEN) { lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING); smc_accept_enqueue(&lsmc->sk, newsmcsk); @@ -1987,6 +1988,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc, if (pclc->hdr.typev1 == SMC_TYPE_N) return 0; pclc_prfx = smc_clc_proposal_get_prefix(pclc); + if (!pclc_prfx) + return -EPROTO; if (smc_clc_prfx_match(newclcsock, pclc_prfx)) return SMC_CLC_DECL_DIFFPREFIX; @@ -2093,6 +2096,8 @@ static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc, pclc_smcd = smc_get_clc_msg_smcd(pclc); smc_v2_ext = smc_get_clc_v2_ext(pclc); smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext); + if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext) + goto not_found; mutex_lock(&smcd_dev_list.mutex); if (pclc_smcd->ism.chid) @@ -2152,7 +2157,9 @@ static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc, int rc = 0; /* check if ISM V1 is available */ - if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1)) + if (!(ini->smcd_version & SMC_V1) || + !smcd_indicated(ini->smc_type_v1) || + !pclc_smcd) goto not_found; ini->is_smcd = true; /* prepare ISM check */ ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid); @@ -2352,6 +2359,7 @@ static void smc_listen_work(struct work_struct *work) u8 accept_version; int rc = 0; + lock_sock(&new_smc->sk); /* release in smc_listen_out() */ if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN) return smc_listen_out_err(new_smc); @@ -2637,7 +2645,7 @@ static int smc_accept(struct socket *sock, struct socket *new_sock, release_sock(clcsk); } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) { lock_sock(nsk); - smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available); + smc_rx_wait(smc_sk(nsk), &timeo, 0, smc_rx_data_available); release_sock(nsk); } } @@ -2787,6 +2795,13 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, } else { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); + + if (sk->sk_state != SMC_INIT) { + /* Race breaker the same way as tcp_poll(). */ + smp_mb__after_atomic(); + if (atomic_read(&smc->conn.sndbuf_space)) + mask |= EPOLLOUT | EPOLLWRNORM; + } } if (atomic_read(&smc->conn.bytes_to_rcv)) mask |= EPOLLIN | EPOLLRDNORM; diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 867df4522815..a48fdc83fe6b 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c @@ -354,6 +354,10 @@ static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc) v2_ext = smc_get_clc_v2_ext(pclc); pclc_prfx = smc_clc_proposal_get_prefix(pclc); + if (!pclc_prfx || + pclc_prfx->ipv6_prefixes_cnt > SMC_CLC_MAX_V6_PREFIX) + return false; + if (hdr->version == SMC_V1) { if (hdr->typev1 == SMC_TYPE_N) return false; @@ -749,6 +753,11 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen, SMC_CLC_RECV_BUF_LEN : datlen; iov_iter_kvec(&msg.msg_iter, ITER_DEST, &vec, 1, recvlen); len = sock_recvmsg(smc->clcsock, &msg, krflags); + if (len < recvlen) { + smc->sk.sk_err = EPROTO; + reason_code = -EPROTO; + goto out; + } datlen -= len; } if (clcm->type == SMC_CLC_DECLINE) { diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h index 5fee545c9a10..0f6102cd5de1 100644 --- a/net/smc/smc_clc.h +++ b/net/smc/smc_clc.h @@ -303,8 +303,12 @@ struct smc_clc_msg_decline_v2 { /* clc decline message */ static inline struct smc_clc_msg_proposal_prefix * smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc) { + u16 offset = ntohs(pclc->iparea_offset); + + if (offset > sizeof(struct smc_clc_msg_smcd)) + return NULL; return (struct smc_clc_msg_proposal_prefix *) - ((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset)); + ((u8 *)pclc + sizeof(*pclc) + offset); } static inline bool smcr_indicated(int smc_type) @@ -357,9 +361,15 @@ smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop) static inline struct smc_clc_smcd_v2_extension * smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext) { + u16 max_offset = offsetof(struct smc_clc_msg_proposal_area, pclc_smcd_v2_ext) - + offsetof(struct smc_clc_msg_proposal_area, pclc_v2_ext) - + offsetof(struct smc_clc_v2_extension, hdr) - + offsetofend(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset); + if (!prop_v2ext) return NULL; - if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset)) + if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) || + ntohs(prop_v2ext->hdr.smcd_v2_ext_offset) > max_offset) return NULL; return (struct smc_clc_smcd_v2_extension *) diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 10d79cb55528..890785d4f6b6 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1726,7 +1726,9 @@ void smcr_link_down_cond_sched(struct smc_link *lnk) { if (smc_link_downing(&lnk->state)) { trace_smcr_link_down(lnk, __builtin_return_address(0)); - schedule_work(&lnk->link_down_wrk); + smcr_link_hold(lnk); /* smcr_link_put in link_down_wrk */ + if (!schedule_work(&lnk->link_down_wrk)) + smcr_link_put(lnk); } } @@ -1758,11 +1760,14 @@ static void smc_link_down_work(struct work_struct *work) struct smc_link_group *lgr = link->lgr; if (list_empty(&lgr->list)) - return; + goto out; wake_up_all(&lgr->llc_msg_waiter); mutex_lock(&lgr->llc_conf_mutex); smcr_link_down(link); mutex_unlock(&lgr->llc_conf_mutex); + +out: + smcr_link_put(link); /* smcr_link_hold by schedulers of link_down_work */ } static int smc_vlan_by_tcpsk_walk(struct net_device *lower_dev, diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c index 7a907186a33a..2d1cd033398b 100644 --- a/net/smc/smc_diag.c +++ b/net/smc/smc_diag.c @@ -249,6 +249,7 @@ static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) } static const struct sock_diag_handler smc_diag_handler = { + .owner = THIS_MODULE, .family = AF_SMC, .dump = smc_diag_handler_dump, }; diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 399314cfab90..af12e0215274 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c @@ -1080,14 +1080,16 @@ static void smc_pnet_find_roce_by_pnetid(struct net_device *ndev, struct smc_init_info *ini) { u8 ndev_pnetid[SMC_MAX_PNETID_LEN]; + struct net_device *base_ndev; struct net *net; - ndev = pnet_find_base_ndev(ndev); + base_ndev = pnet_find_base_ndev(ndev); net = dev_net(ndev); - if (smc_pnetid_by_dev_port(ndev->dev.parent, ndev->dev_port, + if (smc_pnetid_by_dev_port(base_ndev->dev.parent, base_ndev->dev_port, ndev_pnetid) && + smc_pnet_find_ndev_pnetid_by_table(base_ndev, ndev_pnetid) && smc_pnet_find_ndev_pnetid_by_table(ndev, ndev_pnetid)) { - smc_pnet_find_rdma_dev(ndev, ini); + smc_pnet_find_rdma_dev(base_ndev, ini); return; /* pnetid could not be determined */ } _smc_pnet_find_roce_by_pnetid(ndev_pnetid, ini, NULL, net); diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c index ffcc9996a3da..e57002d2ac37 100644 --- a/net/smc/smc_rx.c +++ b/net/smc/smc_rx.c @@ -234,22 +234,23 @@ out: return -ENOMEM; } -static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn) +static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn, size_t peeked) { - return atomic_read(&conn->bytes_to_rcv) && + return smc_rx_data_available(conn, peeked) && !atomic_read(&conn->splice_pending); } /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted * @smc smc socket * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout + * @peeked number of bytes already peeked * @fcrit add'l criterion to evaluate as function pointer * Returns: * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown. * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted). */ -int smc_rx_wait(struct smc_sock *smc, long *timeo, - int (*fcrit)(struct smc_connection *conn)) +int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked, + int (*fcrit)(struct smc_connection *conn, size_t baseline)) { DEFINE_WAIT_FUNC(wait, woken_wake_function); struct smc_connection *conn = &smc->conn; @@ -258,7 +259,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo, struct sock *sk = &smc->sk; int rc; - if (fcrit(conn)) + if (fcrit(conn, peeked)) return 1; sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); add_wait_queue(sk_sleep(sk), &wait); @@ -267,7 +268,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo, cflags->peer_conn_abort || READ_ONCE(sk->sk_shutdown) & RCV_SHUTDOWN || conn->killed || - fcrit(conn), + fcrit(conn, peeked), &wait); remove_wait_queue(sk_sleep(sk), &wait); sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); @@ -318,11 +319,11 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len, return -EAGAIN; } -static bool smc_rx_recvmsg_data_available(struct smc_sock *smc) +static bool smc_rx_recvmsg_data_available(struct smc_sock *smc, size_t peeked) { struct smc_connection *conn = &smc->conn; - if (smc_rx_data_available(conn)) + if (smc_rx_data_available(conn, peeked)) return true; else if (conn->urg_state == SMC_URG_VALID) /* we received a single urgent Byte - skip */ @@ -340,10 +341,10 @@ static bool smc_rx_recvmsg_data_available(struct smc_sock *smc) int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, struct pipe_inode_info *pipe, size_t len, int flags) { - size_t copylen, read_done = 0, read_remaining = len; + size_t copylen, read_done = 0, read_remaining = len, peeked_bytes = 0; size_t chunk_len, chunk_off, chunk_len_sum; struct smc_connection *conn = &smc->conn; - int (*func)(struct smc_connection *conn); + int (*func)(struct smc_connection *conn, size_t baseline); union smc_host_cursor cons; int readable, chunk; char *rcvbuf_base; @@ -380,14 +381,14 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, if (conn->killed) break; - if (smc_rx_recvmsg_data_available(smc)) + if (smc_rx_recvmsg_data_available(smc, peeked_bytes)) goto copy; if (sk->sk_shutdown & RCV_SHUTDOWN) { /* smc_cdc_msg_recv_action() could have run after * above smc_rx_recvmsg_data_available() */ - if (smc_rx_recvmsg_data_available(smc)) + if (smc_rx_recvmsg_data_available(smc, peeked_bytes)) goto copy; break; } @@ -421,26 +422,28 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, } } - if (!smc_rx_data_available(conn)) { - smc_rx_wait(smc, &timeo, smc_rx_data_available); + if (!smc_rx_data_available(conn, peeked_bytes)) { + smc_rx_wait(smc, &timeo, peeked_bytes, smc_rx_data_available); continue; } copy: /* initialize variables for 1st iteration of subsequent loop */ /* could be just 1 byte, even after waiting on data above */ - readable = atomic_read(&conn->bytes_to_rcv); + readable = smc_rx_data_available(conn, peeked_bytes); splbytes = atomic_read(&conn->splice_pending); if (!readable || (msg && splbytes)) { if (splbytes) func = smc_rx_data_available_and_no_splice_pend; else func = smc_rx_data_available; - smc_rx_wait(smc, &timeo, func); + smc_rx_wait(smc, &timeo, peeked_bytes, func); continue; } smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); + if ((flags & MSG_PEEK) && peeked_bytes) + smc_curs_add(conn->rmb_desc->len, &cons, peeked_bytes); /* subsequent splice() calls pick up where previous left */ if (splbytes) smc_curs_add(conn->rmb_desc->len, &cons, splbytes); @@ -476,6 +479,8 @@ copy: } read_remaining -= chunk_len; read_done += chunk_len; + if (flags & MSG_PEEK) + peeked_bytes += chunk_len; if (chunk_len_sum == copylen) break; /* either on 1st or 2nd iteration */ diff --git a/net/smc/smc_rx.h b/net/smc/smc_rx.h index db823c97d824..994f5e42d1ba 100644 --- a/net/smc/smc_rx.h +++ b/net/smc/smc_rx.h @@ -21,11 +21,11 @@ void smc_rx_init(struct smc_sock *smc); int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg, struct pipe_inode_info *pipe, size_t len, int flags); -int smc_rx_wait(struct smc_sock *smc, long *timeo, - int (*fcrit)(struct smc_connection *conn)); -static inline int smc_rx_data_available(struct smc_connection *conn) +int smc_rx_wait(struct smc_sock *smc, long *timeo, size_t peeked, + int (*fcrit)(struct smc_connection *conn, size_t baseline)); +static inline int smc_rx_data_available(struct smc_connection *conn, size_t peeked) { - return atomic_read(&conn->bytes_to_rcv); + return atomic_read(&conn->bytes_to_rcv) - peeked; } #endif /* SMC_RX_H */ diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 8299ceb3e373..95696f42647e 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -347,7 +347,10 @@ static int strp_read_sock(struct strparser *strp) struct socket *sock = strp->sk->sk_socket; read_descriptor_t desc; - if (unlikely(!sock || !sock->ops || !sock->ops->read_sock)) + if (unlikely(!sock || !sock->ops)) + return -EBUSY; + + if (unlikely(!strp->cb.read_sock && !sock->ops->read_sock)) return -EBUSY; desc.arg.data = strp; @@ -355,7 +358,10 @@ static int strp_read_sock(struct strparser *strp) desc.count = 1; /* give more than one skb per call */ /* sk should be locked here, so okay to do read_sock */ - sock->ops->read_sock(strp->sk, &desc, strp_recv); + if (strp->cb.read_sock) + strp->cb.read_sock(strp, &desc, strp_recv); + else + sock->ops->read_sock(strp->sk, &desc, strp_recv); desc.error = strp->cb.read_sock_done(strp, desc.error); @@ -468,6 +474,7 @@ int strp_init(struct strparser *strp, struct sock *sk, strp->cb.unlock = cb->unlock ? : strp_sock_unlock; strp->cb.rcv_msg = cb->rcv_msg; strp->cb.parse_msg = cb->parse_msg; + strp->cb.read_sock = cb->read_sock; strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index f075a9fb5ccc..7ac4648c7da7 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1431,7 +1431,9 @@ static int c_show(struct seq_file *m, void *p) seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n", convert_to_wallclock(cp->expiry_time), kref_read(&cp->ref), cp->flags); - cache_get(cp); + if (!cache_get_rcu(cp)) + return 0; + if (cache_check(cd, cp, NULL)) /* cache_check does a cache_put on failure */ seq_puts(m, "# "); @@ -1673,12 +1675,14 @@ static void remove_cache_proc_entries(struct cache_detail *cd) } } -#ifdef CONFIG_PROC_FS static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) { struct proc_dir_entry *p; struct sunrpc_net *sn; + if (!IS_ENABLED(CONFIG_PROC_FS)) + return 0; + sn = net_generic(net, sunrpc_net_id); cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc); if (cd->procfs == NULL) @@ -1706,12 +1710,6 @@ out_nomem: remove_cache_proc_entries(cd); return -ENOMEM; } -#else /* CONFIG_PROC_FS */ -static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) -{ - return 0; -} -#endif void __init cache_initialize(void) { diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index b6529a9d37d3..a390a4e5592f 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -275,9 +275,6 @@ static struct rpc_xprt *rpc_clnt_set_transport(struct rpc_clnt *clnt, old = rcu_dereference_protected(clnt->cl_xprt, lockdep_is_held(&clnt->cl_lock)); - if (!xprt_bound(xprt)) - clnt->cl_autobind = 1; - clnt->cl_timeout = timeout; rcu_assign_pointer(clnt->cl_xprt, xprt); spin_unlock(&clnt->cl_lock); diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 82afb56695f8..1ec20163a0b7 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c @@ -797,9 +797,10 @@ static void rpcb_getport_done(struct rpc_task *child, void *data) } trace_rpcb_setport(child, map->r_status, map->r_port); - xprt->ops->set_port(xprt, map->r_port); - if (map->r_port) + if (map->r_port) { + xprt->ops->set_port(xprt, map->r_port); xprt_set_bound(xprt); + } } /* diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index cef623ea1506..73bc39281ef5 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -276,6 +276,8 @@ EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) { + if (unlikely(current->flags & PF_EXITING)) + return -EINTR; schedule(); if (signal_pending_state(mode, current)) return -ERESTARTSYS; @@ -864,8 +866,6 @@ void rpc_signal_task(struct rpc_task *task) if (!rpc_task_set_rpc_status(task, -ERESTARTSYS)) return; trace_rpc_task_signalled(task, task->tk_action); - set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate); - smp_mb__after_atomic(); queue = READ_ONCE(task->tk_waitqueue); if (queue) rpc_wake_up_queued_task(queue, task); diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index 5bc20e9d09cd..e1d4e426b21f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c @@ -212,24 +212,6 @@ static struct ctl_table svcrdma_parm_table[] = { { }, }; -static struct ctl_table svcrdma_table[] = { - { - .procname = "svc_rdma", - .mode = 0555, - .child = svcrdma_parm_table - }, - { }, -}; - -static struct ctl_table svcrdma_root_table[] = { - { - .procname = "sunrpc", - .mode = 0555, - .child = svcrdma_table - }, - { }, -}; - static void svc_rdma_proc_cleanup(void) { if (!svcrdma_table_header) @@ -252,24 +234,34 @@ static int svc_rdma_proc_init(void) rc = percpu_counter_init(&svcrdma_stat_read, 0, GFP_KERNEL); if (rc) - goto out_err; + goto err; rc = percpu_counter_init(&svcrdma_stat_recv, 0, GFP_KERNEL); if (rc) - goto out_err; + goto err_read; rc = percpu_counter_init(&svcrdma_stat_sq_starve, 0, GFP_KERNEL); if (rc) - goto out_err; + goto err_recv; rc = percpu_counter_init(&svcrdma_stat_write, 0, GFP_KERNEL); if (rc) - goto out_err; + goto err_sq; + + svcrdma_table_header = register_sysctl("sunrpc/svc_rdma", + svcrdma_parm_table); + if (!svcrdma_table_header) + goto err_write; - svcrdma_table_header = register_sysctl_table(svcrdma_root_table); return 0; -out_err: +err_write: + rc = -ENOMEM; + percpu_counter_destroy(&svcrdma_stat_write); +err_sq: percpu_counter_destroy(&svcrdma_stat_sq_starve); +err_recv: percpu_counter_destroy(&svcrdma_stat_recv); +err_read: percpu_counter_destroy(&svcrdma_stat_read); +err: return rc; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index b2dd01e5274e..186c9c12432b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -482,7 +482,13 @@ static bool xdr_check_write_chunk(struct svc_rdma_recv_ctxt *rctxt) if (xdr_stream_decode_u32(&rctxt->rc_stream, &segcount)) return false; - /* A bogus segcount causes this buffer overflow check to fail. */ + /* Before trusting the segcount value enough to use it in + * a computation, perform a simple range check. This is an + * arbitrary but sensible limit (ie, not architectural). + */ + if (unlikely(segcount > RPCSVC_MAXPAGES)) + return false; + p = xdr_inline_decode(&rctxt->rc_stream, segcount * rpcrdma_segment_maxsz * sizeof(*p)); return p != NULL; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 190dae11f634..b9dc8e197dde 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1130,6 +1130,7 @@ static void xs_sock_reset_state_flags(struct rpc_xprt *xprt) clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state); clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state); clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state); + clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state); } static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr) diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index 2e14d4c37e2d..9aec24490e8f 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -381,7 +381,7 @@ bool switchdev_port_obj_act_is_deferred(struct net_device *dev, EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred); static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain); -static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain); +static RAW_NOTIFIER_HEAD(switchdev_blocking_notif_chain); /** * register_switchdev_notifier - Register notifier @@ -427,17 +427,27 @@ EXPORT_SYMBOL_GPL(call_switchdev_notifiers); int register_switchdev_blocking_notifier(struct notifier_block *nb) { - struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; + struct raw_notifier_head *chain = &switchdev_blocking_notif_chain; + int err; - return blocking_notifier_chain_register(chain, nb); + rtnl_lock(); + err = raw_notifier_chain_register(chain, nb); + rtnl_unlock(); + + return err; } EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier); int unregister_switchdev_blocking_notifier(struct notifier_block *nb) { - struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain; + struct raw_notifier_head *chain = &switchdev_blocking_notif_chain; + int err; - return blocking_notifier_chain_unregister(chain, nb); + rtnl_lock(); + err = raw_notifier_chain_unregister(chain, nb); + rtnl_unlock(); + + return err; } EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier); @@ -445,10 +455,11 @@ int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info, struct netlink_ext_ack *extack) { + ASSERT_RTNL(); info->dev = dev; info->extack = extack; - return blocking_notifier_call_chain(&switchdev_blocking_notif_chain, - val, info); + return raw_notifier_call_chain(&switchdev_blocking_notif_chain, + val, info); } EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers); diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 65f59739a041..a9c02fac039b 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -817,12 +817,16 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, goto exit; } + /* Get net to avoid freed tipc_crypto when delete namespace */ + get_net(aead->crypto->net); + /* Now, do encrypt */ rc = crypto_aead_encrypt(req); if (rc == -EINPROGRESS || rc == -EBUSY) return rc; tipc_bearer_put(b); + put_net(aead->crypto->net); exit: kfree(ctx); @@ -860,6 +864,7 @@ static void tipc_aead_encrypt_done(struct crypto_async_request *base, int err) kfree(tx_ctx); tipc_bearer_put(b); tipc_aead_put(aead); + put_net(net); } /** @@ -2293,8 +2298,8 @@ static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr) keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME))); /* Verify the supplied size values */ - if (unlikely(size != keylen + sizeof(struct tipc_aead_key) || - keylen > TIPC_AEAD_KEY_SIZE_MAX)) { + if (unlikely(keylen > TIPC_AEAD_KEY_SIZE_MAX || + size != keylen + sizeof(struct tipc_aead_key))) { pr_debug("%s: invalid MSG_CRYPTO key size\n", rx->name); goto exit; } diff --git a/net/tipc/diag.c b/net/tipc/diag.c index 73137f4aeb68..11da9d2ebbf6 100644 --- a/net/tipc/diag.c +++ b/net/tipc/diag.c @@ -95,6 +95,7 @@ static int tipc_sock_diag_handler_dump(struct sk_buff *skb, } static const struct sock_diag_handler tipc_sock_diag_handler = { + .owner = THIS_MODULE, .family = AF_TIPC, .dump = tipc_sock_diag_handler_dump, }; diff --git a/net/tipc/link.c b/net/tipc/link.c index 8715c9b05f90..d6a8f0aa531b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1068,6 +1068,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { if (imp == TIPC_SYSTEM_IMPORTANCE) { pr_warn("%s<%s>, link overflow", link_rst_msg, l->name); + __skb_queue_purge(list); return -ENOBUFS; } rc = link_schedule_user(l, hdr); diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 9618e4429f0f..23efd35adaa3 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -716,7 +716,8 @@ void tipc_mon_reinit_self(struct net *net) if (!mon) continue; write_lock_bh(&mon->lock); - mon->self->addr = tipc_own_addr(net); + if (mon->self) + mon->self->addr = tipc_own_addr(net); write_unlock_bh(&mon->lock); } } diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 73e461dc12d7..f5bd75d931c1 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -811,6 +811,7 @@ static void cleanup_bearer(struct work_struct *work) { struct udp_bearer *ub = container_of(work, struct udp_bearer, work); struct udp_replicast *rcast, *tmp; + struct tipc_net *tn; list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) { dst_cache_destroy(&rcast->dst_cache); @@ -818,10 +819,14 @@ static void cleanup_bearer(struct work_struct *work) kfree_rcu(rcast, rcu); } - atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count); + tn = tipc_net(sock_net(ub->ubsock->sk)); + dst_cache_destroy(&ub->rcast.dst_cache); udp_tunnel_sock_release(ub->ubsock); + + /* Note: could use a call_rcu() to avoid another synchronize_net() */ synchronize_net(); + atomic_dec(&tn->wq_count); kfree(ub); } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 75cd20c0e3fd..14d01558311d 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -900,6 +900,11 @@ static int tls_setsockopt(struct sock *sk, int level, int optname, return do_tls_setsockopt(sk, optname, optval, optlen); } +static int tls_disconnect(struct sock *sk, int flags) +{ + return -EOPNOTSUPP; +} + struct tls_context *tls_ctx_create(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -995,6 +1000,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], prot[TLS_BASE][TLS_BASE] = *base; prot[TLS_BASE][TLS_BASE].setsockopt = tls_setsockopt; prot[TLS_BASE][TLS_BASE].getsockopt = tls_getsockopt; + prot[TLS_BASE][TLS_BASE].disconnect = tls_disconnect; prot[TLS_BASE][TLS_BASE].close = tls_sk_proto_close; prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index f37f4a0fcd3c..44d7f1aef9f1 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -396,7 +396,6 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) return 0; shinfo = skb_shinfo(strp->anchor); - shinfo->frag_list = NULL; /* If we don't know the length go max plus page for cipher overhead */ need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE; @@ -412,6 +411,8 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort) page, 0, 0); } + shinfo->frag_list = NULL; + strp->copy_mode = 1; strp->stm.offset = 0; diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 348abadbc2d8..5310441240e7 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c @@ -452,7 +452,7 @@ int tls_tx_records(struct sock *sk, int flags) tx_err: if (rc < 0 && rc != -EAGAIN) - tls_err_abort(sk, -EBADMSG); + tls_err_abort(sk, rc); return rc; } diff --git a/net/unix/Kconfig b/net/unix/Kconfig index b7f811216820..8b5d04210d7c 100644 --- a/net/unix/Kconfig +++ b/net/unix/Kconfig @@ -4,7 +4,7 @@ # config UNIX - tristate "Unix domain sockets" + bool "Unix domain sockets" help If you say Y here, you will include support for Unix domain sockets; sockets are the standard Unix mechanism for establishing and @@ -14,17 +14,8 @@ config UNIX an embedded system or something similar, you therefore definitely want to say Y here. - To compile this driver as a module, choose M here: the module will be - called unix. Note that several important services won't work - correctly if you say M here and then neglect to load the module. - Say Y unless you know what you are doing. -config UNIX_SCM - bool - depends on UNIX - default y - config AF_UNIX_OOB bool depends on UNIX diff --git a/net/unix/Makefile b/net/unix/Makefile index 20491825b4d0..4ddd125c4642 100644 --- a/net/unix/Makefile +++ b/net/unix/Makefile @@ -11,5 +11,3 @@ unix-$(CONFIG_BPF_SYSCALL) += unix_bpf.o obj-$(CONFIG_UNIX_DIAG) += unix_diag.o unix_diag-y := diag.o - -obj-$(CONFIG_UNIX_SCM) += scm.o diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 5ce60087086c..79b783a70c87 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -116,8 +116,6 @@ #include #include -#include "scm.h" - static atomic_long_t unix_nr_socks; static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2]; static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; @@ -956,11 +954,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen); sk->sk_destruct = unix_sock_destructor; u = unix_sk(sk); - u->inflight = 0; + u->listener = NULL; + u->vertex = NULL; u->path.dentry = NULL; u->path.mnt = NULL; spin_lock_init(&u->lock); - INIT_LIST_HEAD(&u->link); mutex_init(&u->iolock); /* single task reading lock */ mutex_init(&u->bindlock); /* single task binding lock */ init_waitqueue_head(&u->peer_wait); @@ -1559,6 +1557,7 @@ restart: newsk->sk_type = sk->sk_type; init_peercred(newsk); newu = unix_sk(newsk); + newu->listener = other; RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); otheru = unix_sk(other); @@ -1652,8 +1651,8 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags, bool kern) { struct sock *sk = sock->sk; - struct sock *tsk; struct sk_buff *skb; + struct sock *tsk; int err; err = -EOPNOTSUPP; @@ -1683,6 +1682,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags, /* attach accepted sock to socket */ unix_state_lock(tsk); + unix_update_edges(unix_sk(tsk)); newsock->state = SS_CONNECTED; unix_sock_inherit_flags(sock, newsock); sock_graft(tsk, newsock); @@ -1726,51 +1726,65 @@ out: return err; } +/* The "user->unix_inflight" variable is protected by the garbage + * collection lock, and we just read it locklessly here. If you go + * over the limit, there might be a tiny race in actually noticing + * it across threads. Tough. + */ +static inline bool too_many_unix_fds(struct task_struct *p) +{ + struct user_struct *user = current_user(); + + if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); + return false; +} + +static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + if (too_many_unix_fds(current)) + return -ETOOMANYREFS; + + /* Need to duplicate file references for the sake of garbage + * collection. Otherwise a socket in the fps might become a + * candidate for GC while the skb is not yet queued. + */ + UNIXCB(skb).fp = scm_fp_dup(scm->fp); + if (!UNIXCB(skb).fp) + return -ENOMEM; + + if (unix_prepare_fpl(UNIXCB(skb).fp)) + return -ENOMEM; + + return 0; +} + +static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) +{ + scm->fp = UNIXCB(skb).fp; + UNIXCB(skb).fp = NULL; + + unix_destroy_fpl(scm->fp); +} + static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb) { scm->fp = scm_fp_dup(UNIXCB(skb).fp); +} - /* - * Garbage collection of unix sockets starts by selecting a set of - * candidate sockets which have reference only from being in flight - * (total_refs == inflight_refs). This condition is checked once during - * the candidate collection phase, and candidates are marked as such, so - * that non-candidates can later be ignored. While inflight_refs is - * protected by unix_gc_lock, total_refs (file count) is not, hence this - * is an instantaneous decision. - * - * Once a candidate, however, the socket must not be reinstalled into a - * file descriptor while the garbage collection is in progress. - * - * If the above conditions are met, then the directed graph of - * candidates (*) does not change while unix_gc_lock is held. - * - * Any operations that changes the file count through file descriptors - * (dup, close, sendmsg) does not change the graph since candidates are - * not installed in fds. - * - * Dequeing a candidate via recvmsg would install it into an fd, but - * that takes unix_gc_lock to decrement the inflight count, so it's - * serialized with garbage collection. - * - * MSG_PEEK is special in that it does not change the inflight count, - * yet does install the socket into an fd. The following lock/unlock - * pair is to ensure serialization with garbage collection. It must be - * done between incrementing the file count and installing the file into - * an fd. - * - * If garbage collection starts after the barrier provided by the - * lock/unlock, then it will see the elevated refcount and not mark this - * as a candidate. If a garbage collection is already in progress - * before the file count was incremented, then the lock/unlock pair will - * ensure that garbage collection is finished before progressing to - * installing the fd. - * - * (*) A -> B where B is on the queue of A or B is on the queue of C - * which is on the queue of listening socket A. - */ - spin_lock(&unix_gc_lock); - spin_unlock(&unix_gc_lock); +static void unix_destruct_scm(struct sk_buff *skb) +{ + struct scm_cookie scm; + + memset(&scm, 0, sizeof(scm)); + scm.pid = UNIXCB(skb).pid; + if (UNIXCB(skb).fp) + unix_detach_fds(&scm, skb); + + /* Alas, it calls VFS */ + /* So fscking what? fput() had been SMP-safe since the last Summer */ + scm_destroy(&scm); + sock_wfree(skb); } static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) @@ -1845,8 +1859,10 @@ static void scm_stat_add(struct sock *sk, struct sk_buff *skb) struct scm_fp_list *fp = UNIXCB(skb).fp; struct unix_sock *u = unix_sk(sk); - if (unlikely(fp && fp->count)) + if (unlikely(fp && fp->count)) { atomic_add(fp->count, &u->scm_stat.nr_fds); + unix_add_edges(fp, u); + } } static void scm_stat_del(struct sock *sk, struct sk_buff *skb) @@ -1854,8 +1870,10 @@ static void scm_stat_del(struct sock *sk, struct sk_buff *skb) struct scm_fp_list *fp = UNIXCB(skb).fp; struct unix_sock *u = unix_sk(sk); - if (unlikely(fp && fp->count)) + if (unlikely(fp && fp->count)) { atomic_sub(fp->count, &u->scm_stat.nr_fds); + unix_del_edges(fp); + } } /* @@ -1875,11 +1893,12 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, long timeo; int err; - wait_for_unix_gc(); err = scm_send(sock, msg, &scm, false); if (err < 0) return err; + wait_for_unix_gc(scm.fp); + err = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto out; @@ -2145,11 +2164,12 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, bool fds_sent = false; int data_len; - wait_for_unix_gc(); err = scm_send(sock, msg, &scm, false); if (err < 0) return err; + wait_for_unix_gc(scm.fp); + err = -EOPNOTSUPP; if (msg->msg_flags & MSG_OOB) { #if IS_ENABLED(CONFIG_AF_UNIX_OOB) diff --git a/net/unix/diag.c b/net/unix/diag.c index 1de7500b41b6..a6bd861314df 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -322,6 +322,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) } static const struct sock_diag_handler unix_diag_handler = { + .owner = THIS_MODULE, .family = AF_UNIX, .dump = unix_diag_handler_dump, }; diff --git a/net/unix/garbage.c b/net/unix/garbage.c index d2fc795394a5..23efb78fe9ef 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -81,278 +81,551 @@ #include #include -#include "scm.h" - -/* Internal data structures and random procedures: */ - -static LIST_HEAD(gc_candidates); -static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); - -static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), - struct sk_buff_head *hitlist) +struct unix_sock *unix_get_socket(struct file *filp) { - struct sk_buff *skb; - struct sk_buff *next; + struct inode *inode = file_inode(filp); - spin_lock(&x->sk_receive_queue.lock); - skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { - /* Do we have file descriptors ? */ - if (UNIXCB(skb).fp) { - bool hit = false; - /* Process the descriptors of this socket */ - int nfd = UNIXCB(skb).fp->count; - struct file **fp = UNIXCB(skb).fp->fp; + /* Socket ? */ + if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { + struct socket *sock = SOCKET_I(inode); + const struct proto_ops *ops; + struct sock *sk = sock->sk; - while (nfd--) { - /* Get the socket the fd matches if it indeed does so */ - struct sock *sk = unix_get_socket(*fp++); + ops = READ_ONCE(sock->ops); - if (sk) { - struct unix_sock *u = unix_sk(sk); - - /* Ignore non-candidates, they could - * have been added to the queues after - * starting the garbage collection - */ - if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { - hit = true; - - func(u); - } - } - } - if (hit && hitlist != NULL) { - __skb_unlink(skb, &x->sk_receive_queue); - __skb_queue_tail(hitlist, skb); - } - } + /* PF_UNIX ? */ + if (sk && ops && ops->family == PF_UNIX) + return unix_sk(sk); } - spin_unlock(&x->sk_receive_queue.lock); + + return NULL; } -static void scan_children(struct sock *x, void (*func)(struct unix_sock *), - struct sk_buff_head *hitlist) +static struct unix_vertex *unix_edge_successor(struct unix_edge *edge) { - if (x->sk_state != TCP_LISTEN) { - scan_inflight(x, func, hitlist); - } else { - struct sk_buff *skb; - struct sk_buff *next; - struct unix_sock *u; - LIST_HEAD(embryos); - - /* For a listening socket collect the queued embryos - * and perform a scan on them as well. - */ - spin_lock(&x->sk_receive_queue.lock); - skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { - u = unix_sk(skb->sk); - - /* An embryo cannot be in-flight, so it's safe - * to use the list link. - */ - BUG_ON(!list_empty(&u->link)); - list_add_tail(&u->link, &embryos); - } - spin_unlock(&x->sk_receive_queue.lock); - - while (!list_empty(&embryos)) { - u = list_entry(embryos.next, struct unix_sock, link); - scan_inflight(&u->sk, func, hitlist); - list_del_init(&u->link); - } - } -} - -static void dec_inflight(struct unix_sock *usk) -{ - usk->inflight--; -} - -static void inc_inflight(struct unix_sock *usk) -{ - usk->inflight++; -} - -static void inc_inflight_move_tail(struct unix_sock *u) -{ - u->inflight++; - - /* If this still might be part of a cycle, move it to the end - * of the list, so that it's checked even if it was already - * passed over + /* If an embryo socket has a fd, + * the listener indirectly holds the fd's refcnt. */ - if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) - list_move_tail(&u->link, &gc_candidates); + if (edge->successor->listener) + return unix_sk(edge->successor->listener)->vertex; + + return edge->successor->vertex; +} + +static bool unix_graph_maybe_cyclic; +static bool unix_graph_grouped; + +static void unix_update_graph(struct unix_vertex *vertex) +{ + /* If the receiver socket is not inflight, no cyclic + * reference could be formed. + */ + if (!vertex) + return; + + unix_graph_maybe_cyclic = true; + unix_graph_grouped = false; +} + +static LIST_HEAD(unix_unvisited_vertices); + +enum unix_vertex_index { + UNIX_VERTEX_INDEX_MARK1, + UNIX_VERTEX_INDEX_MARK2, + UNIX_VERTEX_INDEX_START, +}; + +static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1; + +static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge) +{ + struct unix_vertex *vertex = edge->predecessor->vertex; + + if (!vertex) { + vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry); + vertex->index = unix_vertex_unvisited_index; + vertex->out_degree = 0; + INIT_LIST_HEAD(&vertex->edges); + INIT_LIST_HEAD(&vertex->scc_entry); + + list_move_tail(&vertex->entry, &unix_unvisited_vertices); + edge->predecessor->vertex = vertex; + } + + vertex->out_degree++; + list_add_tail(&edge->vertex_entry, &vertex->edges); + + unix_update_graph(unix_edge_successor(edge)); +} + +static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge) +{ + struct unix_vertex *vertex = edge->predecessor->vertex; + + if (!fpl->dead) + unix_update_graph(unix_edge_successor(edge)); + + list_del(&edge->vertex_entry); + vertex->out_degree--; + + if (!vertex->out_degree) { + edge->predecessor->vertex = NULL; + list_move_tail(&vertex->entry, &fpl->vertices); + } +} + +static void unix_free_vertices(struct scm_fp_list *fpl) +{ + struct unix_vertex *vertex, *next_vertex; + + list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) { + list_del(&vertex->entry); + kfree(vertex); + } +} + +static DEFINE_SPINLOCK(unix_gc_lock); +unsigned int unix_tot_inflight; + +void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver) +{ + int i = 0, j = 0; + + spin_lock(&unix_gc_lock); + + if (!fpl->count_unix) + goto out; + + do { + struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); + struct unix_edge *edge; + + if (!inflight) + continue; + + edge = fpl->edges + i++; + edge->predecessor = inflight; + edge->successor = receiver; + + unix_add_edge(fpl, edge); + } while (i < fpl->count_unix); + + receiver->scm_stat.nr_unix_fds += fpl->count_unix; + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix); +out: + WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count); + + spin_unlock(&unix_gc_lock); + + fpl->inflight = true; + + unix_free_vertices(fpl); +} + +void unix_del_edges(struct scm_fp_list *fpl) +{ + struct unix_sock *receiver; + int i = 0; + + spin_lock(&unix_gc_lock); + + if (!fpl->count_unix) + goto out; + + do { + struct unix_edge *edge = fpl->edges + i++; + + unix_del_edge(fpl, edge); + } while (i < fpl->count_unix); + + if (!fpl->dead) { + receiver = fpl->edges[0].successor; + receiver->scm_stat.nr_unix_fds -= fpl->count_unix; + } + WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix); +out: + WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count); + + spin_unlock(&unix_gc_lock); + + fpl->inflight = false; +} + +void unix_update_edges(struct unix_sock *receiver) +{ + /* nr_unix_fds is only updated under unix_state_lock(). + * If it's 0 here, the embryo socket is not part of the + * inflight graph, and GC will not see it, so no lock needed. + */ + if (!receiver->scm_stat.nr_unix_fds) { + receiver->listener = NULL; + } else { + spin_lock(&unix_gc_lock); + unix_update_graph(unix_sk(receiver->listener)->vertex); + receiver->listener = NULL; + spin_unlock(&unix_gc_lock); + } +} + +int unix_prepare_fpl(struct scm_fp_list *fpl) +{ + struct unix_vertex *vertex; + int i; + + if (!fpl->count_unix) + return 0; + + for (i = 0; i < fpl->count_unix; i++) { + vertex = kmalloc(sizeof(*vertex), GFP_KERNEL); + if (!vertex) + goto err; + + list_add(&vertex->entry, &fpl->vertices); + } + + fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges), + GFP_KERNEL_ACCOUNT); + if (!fpl->edges) + goto err; + + return 0; + +err: + unix_free_vertices(fpl); + return -ENOMEM; +} + +void unix_destroy_fpl(struct scm_fp_list *fpl) +{ + if (fpl->inflight) + unix_del_edges(fpl); + + kvfree(fpl->edges); + unix_free_vertices(fpl); +} + +static bool unix_vertex_dead(struct unix_vertex *vertex) +{ + struct unix_edge *edge; + struct unix_sock *u; + long total_ref; + + list_for_each_entry(edge, &vertex->edges, vertex_entry) { + struct unix_vertex *next_vertex = unix_edge_successor(edge); + + /* The vertex's fd can be received by a non-inflight socket. */ + if (!next_vertex) + return false; + + /* The vertex's fd can be received by an inflight socket in + * another SCC. + */ + if (next_vertex->scc_index != vertex->scc_index) + return false; + } + + /* No receiver exists out of the same SCC. */ + + edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); + u = edge->predecessor; + total_ref = file_count(u->sk.sk_socket->file); + + /* If not close()d, total_ref > out_degree. */ + if (total_ref != vertex->out_degree) + return false; + + return true; +} + +enum unix_recv_queue_lock_class { + U_RECVQ_LOCK_NORMAL, + U_RECVQ_LOCK_EMBRYO, +}; + +static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist) +{ + skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist); + +#if IS_ENABLED(CONFIG_AF_UNIX_OOB) + if (u->oob_skb) { + WARN_ON_ONCE(skb_unref(u->oob_skb)); + u->oob_skb = NULL; + } +#endif +} + +static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist) +{ + struct unix_vertex *vertex; + + list_for_each_entry_reverse(vertex, scc, scc_entry) { + struct sk_buff_head *queue; + struct unix_edge *edge; + struct unix_sock *u; + + edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry); + u = edge->predecessor; + queue = &u->sk.sk_receive_queue; + + spin_lock(&queue->lock); + + if (u->sk.sk_state == TCP_LISTEN) { + struct sk_buff *skb; + + skb_queue_walk(queue, skb) { + struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; + + /* listener -> embryo order, the inversion never happens. */ + spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO); + unix_collect_queue(unix_sk(skb->sk), hitlist); + spin_unlock(&embryo_queue->lock); + } + } else { + unix_collect_queue(u, hitlist); + } + + spin_unlock(&queue->lock); + } +} + +static bool unix_scc_cyclic(struct list_head *scc) +{ + struct unix_vertex *vertex; + struct unix_edge *edge; + + /* SCC containing multiple vertices ? */ + if (!list_is_singular(scc)) + return true; + + vertex = list_first_entry(scc, typeof(*vertex), scc_entry); + + /* Self-reference or a embryo-listener circle ? */ + list_for_each_entry(edge, &vertex->edges, vertex_entry) { + if (unix_edge_successor(edge) == vertex) + return true; + } + + return false; +} + +static LIST_HEAD(unix_visited_vertices); +static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2; + +static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index, + struct sk_buff_head *hitlist) +{ + LIST_HEAD(vertex_stack); + struct unix_edge *edge; + LIST_HEAD(edge_stack); + +next_vertex: + /* Push vertex to vertex_stack and mark it as on-stack + * (index >= UNIX_VERTEX_INDEX_START). + * The vertex will be popped when finalising SCC later. + */ + list_add(&vertex->scc_entry, &vertex_stack); + + vertex->index = *last_index; + vertex->scc_index = *last_index; + (*last_index)++; + + /* Explore neighbour vertices (receivers of the current vertex's fd). */ + list_for_each_entry(edge, &vertex->edges, vertex_entry) { + struct unix_vertex *next_vertex = unix_edge_successor(edge); + + if (!next_vertex) + continue; + + if (next_vertex->index == unix_vertex_unvisited_index) { + /* Iterative deepening depth first search + * + * 1. Push a forward edge to edge_stack and set + * the successor to vertex for the next iteration. + */ + list_add(&edge->stack_entry, &edge_stack); + + vertex = next_vertex; + goto next_vertex; + + /* 2. Pop the edge directed to the current vertex + * and restore the ancestor for backtracking. + */ +prev_vertex: + edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry); + list_del_init(&edge->stack_entry); + + next_vertex = vertex; + vertex = edge->predecessor->vertex; + + /* If the successor has a smaller scc_index, two vertices + * are in the same SCC, so propagate the smaller scc_index + * to skip SCC finalisation. + */ + vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); + } else if (next_vertex->index != unix_vertex_grouped_index) { + /* Loop detected by a back/cross edge. + * + * The successor is on vertex_stack, so two vertices are in + * the same SCC. If the successor has a smaller *scc_index*, + * propagate it to skip SCC finalisation. + */ + vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index); + } else { + /* The successor was already grouped as another SCC */ + } + } + + if (vertex->index == vertex->scc_index) { + struct unix_vertex *v; + struct list_head scc; + bool scc_dead = true; + + /* SCC finalised. + * + * If the scc_index was not updated, all the vertices above on + * vertex_stack are in the same SCC. Group them using scc_entry. + */ + __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry); + + list_for_each_entry_reverse(v, &scc, scc_entry) { + /* Don't restart DFS from this vertex in unix_walk_scc(). */ + list_move_tail(&v->entry, &unix_visited_vertices); + + /* Mark vertex as off-stack. */ + v->index = unix_vertex_grouped_index; + + if (scc_dead) + scc_dead = unix_vertex_dead(v); + } + + if (scc_dead) + unix_collect_skb(&scc, hitlist); + else if (!unix_graph_maybe_cyclic) + unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); + + list_del(&scc); + } + + /* Need backtracking ? */ + if (!list_empty(&edge_stack)) + goto prev_vertex; +} + +static void unix_walk_scc(struct sk_buff_head *hitlist) +{ + unsigned long last_index = UNIX_VERTEX_INDEX_START; + + unix_graph_maybe_cyclic = false; + + /* Visit every vertex exactly once. + * __unix_walk_scc() moves visited vertices to unix_visited_vertices. + */ + while (!list_empty(&unix_unvisited_vertices)) { + struct unix_vertex *vertex; + + vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); + __unix_walk_scc(vertex, &last_index, hitlist); + } + + list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); + swap(unix_vertex_unvisited_index, unix_vertex_grouped_index); + + unix_graph_grouped = true; +} + +static void unix_walk_scc_fast(struct sk_buff_head *hitlist) +{ + unix_graph_maybe_cyclic = false; + + while (!list_empty(&unix_unvisited_vertices)) { + struct unix_vertex *vertex; + struct list_head scc; + bool scc_dead = true; + + vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry); + list_add(&scc, &vertex->scc_entry); + + list_for_each_entry_reverse(vertex, &scc, scc_entry) { + list_move_tail(&vertex->entry, &unix_visited_vertices); + + if (scc_dead) + scc_dead = unix_vertex_dead(vertex); + } + + if (scc_dead) + unix_collect_skb(&scc, hitlist); + else if (!unix_graph_maybe_cyclic) + unix_graph_maybe_cyclic = unix_scc_cyclic(&scc); + + list_del(&scc); + } + + list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices); } static bool gc_in_progress; -#define UNIX_INFLIGHT_TRIGGER_GC 16000 -void wait_for_unix_gc(void) +static void __unix_gc(struct work_struct *work) +{ + struct sk_buff_head hitlist; + struct sk_buff *skb; + + spin_lock(&unix_gc_lock); + + if (!unix_graph_maybe_cyclic) { + spin_unlock(&unix_gc_lock); + goto skip_gc; + } + + __skb_queue_head_init(&hitlist); + + if (unix_graph_grouped) + unix_walk_scc_fast(&hitlist); + else + unix_walk_scc(&hitlist); + + spin_unlock(&unix_gc_lock); + + skb_queue_walk(&hitlist, skb) { + if (UNIXCB(skb).fp) + UNIXCB(skb).fp->dead = true; + } + + __skb_queue_purge(&hitlist); +skip_gc: + WRITE_ONCE(gc_in_progress, false); +} + +static DECLARE_WORK(unix_gc_work, __unix_gc); + +void unix_gc(void) +{ + WRITE_ONCE(gc_in_progress, true); + queue_work(system_unbound_wq, &unix_gc_work); +} + +#define UNIX_INFLIGHT_TRIGGER_GC 16000 +#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) + +void wait_for_unix_gc(struct scm_fp_list *fpl) { /* If number of inflight sockets is insane, * force a garbage collect right now. + * * Paired with the WRITE_ONCE() in unix_inflight(), - * unix_notinflight() and gc_in_progress(). + * unix_notinflight(), and __unix_gc(). */ if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && !READ_ONCE(gc_in_progress)) unix_gc(); - wait_event(unix_gc_wait, !READ_ONCE(gc_in_progress)); -} - -/* The external entry point: unix_gc() */ -void unix_gc(void) -{ - struct sk_buff *next_skb, *skb; - struct unix_sock *u; - struct unix_sock *next; - struct sk_buff_head hitlist; - struct list_head cursor; - LIST_HEAD(not_cycle_list); - - spin_lock(&unix_gc_lock); - - /* Avoid a recursive GC. */ - if (gc_in_progress) - goto out; - - /* Paired with READ_ONCE() in wait_for_unix_gc(). */ - WRITE_ONCE(gc_in_progress, true); - - /* First, select candidates for garbage collection. Only - * in-flight sockets are considered, and from those only ones - * which don't have any external reference. - * - * Holding unix_gc_lock will protect these candidates from - * being detached, and hence from gaining an external - * reference. Since there are no possible receivers, all - * buffers currently on the candidates' queues stay there - * during the garbage collection. - * - * We also know that no new candidate can be added onto the - * receive queues. Other, non candidate sockets _can_ be - * added to queue, so we must make sure only to touch - * candidates. - * - * Embryos, though never candidates themselves, affect which - * candidates are reachable by the garbage collector. Before - * being added to a listener's queue, an embryo may already - * receive data carrying SCM_RIGHTS, potentially making the - * passed socket a candidate that is not yet reachable by the - * collector. It becomes reachable once the embryo is - * enqueued. Therefore, we must ensure that no SCM-laden - * embryo appears in a (candidate) listener's queue between - * consecutive scan_children() calls. - */ - list_for_each_entry_safe(u, next, &gc_inflight_list, link) { - struct sock *sk = &u->sk; - long total_refs; - - total_refs = file_count(sk->sk_socket->file); - - BUG_ON(!u->inflight); - BUG_ON(total_refs < u->inflight); - if (total_refs == u->inflight) { - list_move_tail(&u->link, &gc_candidates); - __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); - __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); - - if (sk->sk_state == TCP_LISTEN) { - unix_state_lock_nested(sk, U_LOCK_GC_LISTENER); - unix_state_unlock(sk); - } - } - } - - /* Now remove all internal in-flight reference to children of - * the candidates. - */ - list_for_each_entry(u, &gc_candidates, link) - scan_children(&u->sk, dec_inflight, NULL); - - /* Restore the references for children of all candidates, - * which have remaining references. Do this recursively, so - * only those remain, which form cyclic references. - * - * Use a "cursor" link, to make the list traversal safe, even - * though elements might be moved about. - */ - list_add(&cursor, &gc_candidates); - while (cursor.next != &gc_candidates) { - u = list_entry(cursor.next, struct unix_sock, link); - - /* Move cursor to after the current position. */ - list_move(&cursor, &u->link); - - if (u->inflight) { - list_move_tail(&u->link, ¬_cycle_list); - __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); - scan_children(&u->sk, inc_inflight_move_tail, NULL); - } - } - list_del(&cursor); - - /* Now gc_candidates contains only garbage. Restore original - * inflight counters for these as well, and remove the skbuffs - * which are creating the cycle(s). - */ - skb_queue_head_init(&hitlist); - list_for_each_entry(u, &gc_candidates, link) { - scan_children(&u->sk, inc_inflight, &hitlist); - -#if IS_ENABLED(CONFIG_AF_UNIX_OOB) - if (u->oob_skb) { - kfree_skb(u->oob_skb); - u->oob_skb = NULL; - } -#endif - } - - /* not_cycle_list contains those sockets which do not make up a - * cycle. Restore these to the inflight list. - */ - while (!list_empty(¬_cycle_list)) { - u = list_entry(not_cycle_list.next, struct unix_sock, link); - __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); - list_move_tail(&u->link, &gc_inflight_list); - } - - spin_unlock(&unix_gc_lock); - - /* We need io_uring to clean its registered files, ignore all io_uring - * originated skbs. It's fine as io_uring doesn't keep references to - * other io_uring instances and so killing all other files in the cycle - * will put all io_uring references forcing it to go through normal - * release.path eventually putting registered files. - */ - skb_queue_walk_safe(&hitlist, skb, next_skb) { - if (skb->scm_io_uring) { - __skb_unlink(skb, &hitlist); - skb_queue_tail(&skb->sk->sk_receive_queue, skb); - } - } - - /* Here we are. Hitlist is filled. Die. */ - __skb_queue_purge(&hitlist); - - spin_lock(&unix_gc_lock); - - /* There could be io_uring registered files, just push them back to - * the inflight list - */ - list_for_each_entry_safe(u, next, &gc_candidates, link) - list_move_tail(&u->link, &gc_inflight_list); - - /* All candidates should have been detached by now. */ - BUG_ON(!list_empty(&gc_candidates)); - - /* Paired with READ_ONCE() in wait_for_unix_gc(). */ - WRITE_ONCE(gc_in_progress, false); - - wake_up(&unix_gc_wait); - - out: - spin_unlock(&unix_gc_lock); + + /* Penalise users who want to send AF_UNIX sockets + * but whose sockets have not been received yet. + */ + if (!fpl || !fpl->count_unix || + READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) + return; + + if (READ_ONCE(gc_in_progress)) + flush_work(&unix_gc_work); } diff --git a/net/unix/scm.c b/net/unix/scm.c deleted file mode 100644 index 4eff7da9f6f9..000000000000 --- a/net/unix/scm.c +++ /dev/null @@ -1,154 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "scm.h" - -unsigned int unix_tot_inflight; -EXPORT_SYMBOL(unix_tot_inflight); - -LIST_HEAD(gc_inflight_list); -EXPORT_SYMBOL(gc_inflight_list); - -DEFINE_SPINLOCK(unix_gc_lock); -EXPORT_SYMBOL(unix_gc_lock); - -struct sock *unix_get_socket(struct file *filp) -{ - struct sock *u_sock = NULL; - struct inode *inode = file_inode(filp); - - /* Socket ? */ - if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { - struct socket *sock = SOCKET_I(inode); - struct sock *s = sock->sk; - - /* PF_UNIX ? */ - if (s && sock->ops && sock->ops->family == PF_UNIX) - u_sock = s; - } - - return u_sock; -} -EXPORT_SYMBOL(unix_get_socket); - -/* Keep the number of times in flight count for the file - * descriptor if it is for an AF_UNIX socket. - */ -void unix_inflight(struct user_struct *user, struct file *fp) -{ - struct sock *s = unix_get_socket(fp); - - spin_lock(&unix_gc_lock); - - if (s) { - struct unix_sock *u = unix_sk(s); - - if (!u->inflight) { - BUG_ON(!list_empty(&u->link)); - list_add_tail(&u->link, &gc_inflight_list); - } else { - BUG_ON(list_empty(&u->link)); - } - u->inflight++; - /* Paired with READ_ONCE() in wait_for_unix_gc() */ - WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); - } - WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); - spin_unlock(&unix_gc_lock); -} - -void unix_notinflight(struct user_struct *user, struct file *fp) -{ - struct sock *s = unix_get_socket(fp); - - spin_lock(&unix_gc_lock); - - if (s) { - struct unix_sock *u = unix_sk(s); - - BUG_ON(!u->inflight); - BUG_ON(list_empty(&u->link)); - - u->inflight--; - if (!u->inflight) - list_del_init(&u->link); - /* Paired with READ_ONCE() in wait_for_unix_gc() */ - WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); - } - WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); - spin_unlock(&unix_gc_lock); -} - -/* - * The "user->unix_inflight" variable is protected by the garbage - * collection lock, and we just read it locklessly here. If you go - * over the limit, there might be a tiny race in actually noticing - * it across threads. Tough. - */ -static inline bool too_many_unix_fds(struct task_struct *p) -{ - struct user_struct *user = current_user(); - - if (unlikely(READ_ONCE(user->unix_inflight) > task_rlimit(p, RLIMIT_NOFILE))) - return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN); - return false; -} - -int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) -{ - int i; - - if (too_many_unix_fds(current)) - return -ETOOMANYREFS; - - /* - * Need to duplicate file references for the sake of garbage - * collection. Otherwise a socket in the fps might become a - * candidate for GC while the skb is not yet queued. - */ - UNIXCB(skb).fp = scm_fp_dup(scm->fp); - if (!UNIXCB(skb).fp) - return -ENOMEM; - - for (i = scm->fp->count - 1; i >= 0; i--) - unix_inflight(scm->fp->user, scm->fp->fp[i]); - return 0; -} -EXPORT_SYMBOL(unix_attach_fds); - -void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) -{ - int i; - - scm->fp = UNIXCB(skb).fp; - UNIXCB(skb).fp = NULL; - - for (i = scm->fp->count-1; i >= 0; i--) - unix_notinflight(scm->fp->user, scm->fp->fp[i]); -} -EXPORT_SYMBOL(unix_detach_fds); - -void unix_destruct_scm(struct sk_buff *skb) -{ - struct scm_cookie scm; - - memset(&scm, 0, sizeof(scm)); - scm.pid = UNIXCB(skb).pid; - if (UNIXCB(skb).fp) - unix_detach_fds(&scm, skb); - - /* Alas, it calls VFS */ - /* So fscking what? fput() had been SMP-safe since the last Summer */ - scm_destroy(&scm); - sock_wfree(skb); -} -EXPORT_SYMBOL(unix_destruct_scm); diff --git a/net/unix/scm.h b/net/unix/scm.h deleted file mode 100644 index 5a255a477f16..000000000000 --- a/net/unix/scm.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef NET_UNIX_SCM_H -#define NET_UNIX_SCM_H - -extern struct list_head gc_inflight_list; -extern spinlock_t unix_gc_lock; - -int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb); -void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb); - -#endif diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 84471745c082..a2271da346d3 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -116,12 +116,14 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); static void vsock_sk_destruct(struct sock *sk); static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); +static void vsock_close(struct sock *sk, long timeout); /* Protocol family. */ static struct proto vsock_proto = { .name = "AF_VSOCK", .owner = THIS_MODULE, .obj_size = sizeof(struct vsock_sock), + .close = vsock_close, }; /* The default peer timeout indicates how long we will wait for a peer response @@ -331,7 +333,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_socket); void vsock_remove_sock(struct vsock_sock *vsk) { - vsock_remove_bound(vsk); + /* Transport reassignment must not remove the binding. */ + if (sock_flag(sk_vsock(vsk), SOCK_DEAD)) + vsock_remove_bound(vsk); + vsock_remove_connected(vsk); } EXPORT_SYMBOL_GPL(vsock_remove_sock); @@ -485,6 +490,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk) */ vsk->transport->release(vsk); vsock_deassign_transport(vsk); + + /* transport's release() and destruct() can touch some socket + * state, since we are reassigning the socket to a new transport + * during vsock_connect(), let's reset these fields to have a + * clean state. + */ + sock_reset_flag(sk, SOCK_DONE); + sk->sk_state = TCP_CLOSE; + vsk->peer_shutdown = 0; } /* We increase the module refcnt to prevent the transport unloading @@ -794,39 +808,44 @@ static bool sock_type_connectible(u16 type) static void __vsock_release(struct sock *sk, int level) { - if (sk) { - struct sock *pending; - struct vsock_sock *vsk; + struct vsock_sock *vsk; + struct sock *pending; - vsk = vsock_sk(sk); - pending = NULL; /* Compiler warning. */ + vsk = vsock_sk(sk); + pending = NULL; /* Compiler warning. */ - /* When "level" is SINGLE_DEPTH_NESTING, use the nested - * version to avoid the warning "possible recursive locking - * detected". When "level" is 0, lock_sock_nested(sk, level) - * is the same as lock_sock(sk). - */ - lock_sock_nested(sk, level); + /* When "level" is SINGLE_DEPTH_NESTING, use the nested + * version to avoid the warning "possible recursive locking + * detected". When "level" is 0, lock_sock_nested(sk, level) + * is the same as lock_sock(sk). + */ + lock_sock_nested(sk, level); - if (vsk->transport) - vsk->transport->release(vsk); - else if (sock_type_connectible(sk->sk_type)) - vsock_remove_sock(vsk); + /* Indicate to vsock_remove_sock() that the socket is being released and + * can be removed from the bound_table. Unlike transport reassignment + * case, where the socket must remain bound despite vsock_remove_sock() + * being called from the transport release() callback. + */ + sock_set_flag(sk, SOCK_DEAD); - sock_orphan(sk); - sk->sk_shutdown = SHUTDOWN_MASK; + if (vsk->transport) + vsk->transport->release(vsk); + else if (sock_type_connectible(sk->sk_type)) + vsock_remove_sock(vsk); - skb_queue_purge(&sk->sk_receive_queue); + sock_orphan(sk); + sk->sk_shutdown = SHUTDOWN_MASK; - /* Clean up any sockets that never were accepted. */ - while ((pending = vsock_dequeue_accept(sk)) != NULL) { - __vsock_release(pending, SINGLE_DEPTH_NESTING); - sock_put(pending); - } + skb_queue_purge(&sk->sk_receive_queue); - release_sock(sk); - sock_put(sk); + /* Clean up any sockets that never were accepted. */ + while ((pending = vsock_dequeue_accept(sk)) != NULL) { + __vsock_release(pending, SINGLE_DEPTH_NESTING); + sock_put(pending); } + + release_sock(sk); + sock_put(sk); } static void vsock_sk_destruct(struct sock *sk) @@ -864,6 +883,9 @@ EXPORT_SYMBOL_GPL(vsock_create_connected); s64 vsock_stream_has_data(struct vsock_sock *vsk) { + if (WARN_ON(!vsk->transport)) + return 0; + return vsk->transport->stream_has_data(vsk); } EXPORT_SYMBOL_GPL(vsock_stream_has_data); @@ -872,6 +894,9 @@ static s64 vsock_connectible_has_data(struct vsock_sock *vsk) { struct sock *sk = sk_vsock(vsk); + if (WARN_ON(!vsk->transport)) + return 0; + if (sk->sk_type == SOCK_SEQPACKET) return vsk->transport->seqpacket_has_data(vsk); else @@ -880,6 +905,9 @@ static s64 vsock_connectible_has_data(struct vsock_sock *vsk) s64 vsock_stream_has_space(struct vsock_sock *vsk) { + if (WARN_ON(!vsk->transport)) + return 0; + return vsk->transport->stream_has_space(vsk); } EXPORT_SYMBOL_GPL(vsock_stream_has_space); @@ -894,9 +922,22 @@ void vsock_data_ready(struct sock *sk) } EXPORT_SYMBOL_GPL(vsock_data_ready); +/* Dummy callback required by sockmap. + * See unconditional call of saved_close() in sock_map_close(). + */ +static void vsock_close(struct sock *sk, long timeout) +{ +} + static int vsock_release(struct socket *sock) { - __vsock_release(sock->sk, 0); + struct sock *sk = sock->sk; + + if (!sk) + return 0; + + sk->sk_prot->close(sk, 0); + __vsock_release(sk, 0); sock->sk = NULL; sock->state = SS_FREE; @@ -1396,7 +1437,11 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr, timeout = vsk->connect_timeout; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) { + /* If the socket is already closing or it is in an error state, there + * is no point in waiting. + */ + while (sk->sk_state != TCP_ESTABLISHED && + sk->sk_state != TCP_CLOSING && sk->sk_err == 0) { if (flags & O_NONBLOCK) { /* If we're not going to block, we schedule a timeout * function to generate a timeout on the connection diff --git a/net/vmw_vsock/diag.c b/net/vmw_vsock/diag.c index a2823b1c5e28..6efa9eb93336 100644 --- a/net/vmw_vsock/diag.c +++ b/net/vmw_vsock/diag.c @@ -157,6 +157,7 @@ static int vsock_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) } static const struct sock_diag_handler vsock_diag_handler = { + .owner = THIS_MODULE, .family = AF_VSOCK, .dump = vsock_diag_handler_dump, }; diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index b22dc7bed218..a65da57fe26f 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -26,6 +26,9 @@ /* Threshold for detecting small packets to copy */ #define GOOD_COPY_LEN 128 +static void virtio_transport_cancel_close_work(struct vsock_sock *vsk, + bool cancel_timeout); + static const struct virtio_transport * virtio_transport_get_ops(struct vsock_sock *vsk) { @@ -826,6 +829,8 @@ void virtio_transport_destruct(struct vsock_sock *vsk) { struct virtio_vsock_sock *vvs = vsk->trans; + virtio_transport_cancel_close_work(vsk, true); + kfree(vvs); vsk->trans = NULL; } @@ -910,6 +915,22 @@ static void virtio_transport_wait_close(struct sock *sk, long timeout) } } +static void virtio_transport_cancel_close_work(struct vsock_sock *vsk, + bool cancel_timeout) +{ + struct sock *sk = sk_vsock(vsk); + + if (vsk->close_work_scheduled && + (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { + vsk->close_work_scheduled = false; + + virtio_transport_remove_sock(vsk); + + /* Release refcnt obtained when we scheduled the timeout */ + sock_put(sk); + } +} + static void virtio_transport_do_close(struct vsock_sock *vsk, bool cancel_timeout) { @@ -921,15 +942,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk, sk->sk_state = TCP_CLOSING; sk->sk_state_change(sk); - if (vsk->close_work_scheduled && - (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { - vsk->close_work_scheduled = false; - - virtio_transport_remove_sock(vsk); - - /* Release refcnt obtained when we scheduled the timeout */ - sock_put(sk); - } + virtio_transport_cancel_close_work(vsk, cancel_timeout); } static void virtio_transport_close_timeout(struct work_struct *work) @@ -1220,6 +1233,14 @@ virtio_transport_recv_listen(struct sock *sk, struct sk_buff *skb, return -ENOMEM; } + /* __vsock_release() might have already flushed accept_queue. + * Subsequent enqueues would lead to a memory leak. + */ + if (sk->sk_shutdown == SHUTDOWN_MASK) { + virtio_transport_reset_no_sock(t, skb); + return -ESHUTDOWN; + } + child = vsock_create_connected(sk); if (!child) { virtio_transport_reset_no_sock(t, skb); @@ -1326,8 +1347,11 @@ void virtio_transport_recv_pkt(struct virtio_transport *t, lock_sock(sk); - /* Check if sk has been closed before lock_sock */ - if (sock_flag(sk, SOCK_DONE)) { + /* Check if sk has been closed or assigned to another transport before + * lock_sock (note: listener sockets are not assigned to any transport) + */ + if (sock_flag(sk, SOCK_DONE) || + (sk->sk_state != TCP_LISTEN && vsk->transport != &t->transport)) { (void)virtio_transport_reset_no_sock(t, skb); release_sock(sk); sock_put(sk); diff --git a/net/wireless/core.c b/net/wireless/core.c index 2bed30621fa6..74904f88edfa 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1146,6 +1146,13 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev) { struct cfg80211_internal_bss *scan, *tmp; struct cfg80211_beacon_registration *reg, *treg; + unsigned long flags; + + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); + WARN_ON(!list_empty(&rdev->wiphy_work_list)); + spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); + cancel_work_sync(&rdev->wiphy_work); + rfkill_destroy(rdev->wiphy.rfkill); list_for_each_entry_safe(reg, treg, &rdev->beacon_registrations, list) { list_del(®->list); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 3e1c4e23484d..3ff2fe98a974 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -806,7 +806,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MLO_LINKS] = NLA_POLICY_NESTED_ARRAY(nl80211_policy), [NL80211_ATTR_MLO_LINK_ID] = - NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS), + NLA_POLICY_RANGE(NLA_U8, 0, IEEE80211_MLD_MAX_NUM_LINKS - 1), [NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN), [NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT }, @@ -4050,6 +4050,11 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) if (flags[flag]) *mntrflags |= (1<bss_list, list) { struct cfg80211_bss *res = &intbss->pub; const struct cfg80211_bss_ies *ies; + const struct element *ssid_elem; + struct cfg80211_colocated_ap *entry; + u32 s_ssid_tmp; + int ret; ies = rcu_access_pointer(res->ies); count += cfg80211_parse_colocated_ap(ies, &coloc_ap_list); + + /* In case the scan request specified a specific BSSID + * and the BSS is found and operating on 6GHz band then + * add this AP to the collocated APs list. + * This is relevant for ML probe requests when the lower + * band APs have not been discovered. + */ + if (is_broadcast_ether_addr(rdev_req->bssid) || + !ether_addr_equal(rdev_req->bssid, res->bssid) || + res->channel->band != NL80211_BAND_6GHZ) + continue; + + ret = cfg80211_calc_short_ssid(ies, &ssid_elem, + &s_ssid_tmp); + if (ret) + continue; + + entry = kzalloc(sizeof(*entry), GFP_ATOMIC); + if (!entry) + continue; + + memcpy(entry->bssid, res->bssid, ETH_ALEN); + entry->short_ssid = s_ssid_tmp; + memcpy(entry->ssid, ssid_elem->data, + ssid_elem->datalen); + entry->ssid_len = ssid_elem->datalen; + entry->short_ssid_valid = true; + entry->center_freq = res->channel->center_freq; + + list_add_tail(&entry->list, &coloc_ap_list); + count++; } spin_unlock_bh(&rdev->bss_lock); } diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index ed6c71826d31..21d5fdba47c4 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -102,7 +102,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs, if (pool->unaligned) pool->free_heads[i] = xskb; else - xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); + xp_init_xskb_addr(xskb, pool, (u64)i * pool->chunk_size); } return pool; @@ -356,10 +356,9 @@ void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) return; } - if (!refcount_dec_and_test(&dma_map->users)) - return; + if (refcount_dec_and_test(&dma_map->users)) + __xp_dma_unmap(dma_map, attrs); - __xp_dma_unmap(dma_map, attrs); kvfree(pool->dma_pages); pool->dma_pages_cnt = 0; pool->dev = NULL; diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c index 22b36c8143cf..e1012bfec720 100644 --- a/net/xdp/xsk_diag.c +++ b/net/xdp/xsk_diag.c @@ -194,6 +194,7 @@ static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr) } static const struct sock_diag_handler xsk_diag_handler = { + .owner = THIS_MODULE, .family = AF_XDP, .dump = xsk_diag_handler_dump, }; diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c index acc8e52a4f5f..47cb92c4d541 100644 --- a/net/xdp/xskmap.c +++ b/net/xdp/xskmap.c @@ -216,7 +216,7 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key) struct xsk_map *m = container_of(map, struct xsk_map, map); struct xdp_sock __rcu **map_entry; struct xdp_sock *old_xs; - int k = *(u32 *)key; + u32 k = *(u32 *)key; if (k >= map->max_entries) return -EINVAL; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 07a7ee43b8ae..c59c548d8fc1 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -738,7 +738,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb) skb->encapsulation = 1; if (skb_is_gso(skb)) { - if (skb->inner_protocol) + if (skb->inner_protocol && x->props.mode == XFRM_MODE_TUNNEL) return xfrm_output_gso(net, sk, skb); skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index a022f4984687..e015ff225b27 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1597,6 +1597,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) struct xfrm_policy *delpol; struct hlist_head *chain; + /* Sanitize mark before store */ + policy->mark.v &= policy->mark.m; + spin_lock_bh(&net->xfrm.xfrm_policy_lock); chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); if (chain) diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index ce56d659c55a..7f52bb2e14c1 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -714,10 +714,12 @@ static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff oseq += skb_shinfo(skb)->gso_segs; } - if (unlikely(xo->seq.low < replay_esn->oseq)) { - XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi; - xo->seq.hi = oseq_hi; - replay_esn->oseq_hi = oseq_hi; + if (unlikely(oseq < replay_esn->oseq)) { + replay_esn->oseq_hi = ++oseq_hi; + if (xo->seq.low < replay_esn->oseq) { + XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; + xo->seq.hi = oseq_hi; + } if (replay_esn->oseq_hi == 0) { replay_esn->oseq--; replay_esn->oseq_hi--; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 2f4cf976b59a..58c53bb1c583 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -694,9 +694,6 @@ int __xfrm_state_delete(struct xfrm_state *x) net->xfrm.state_num--; spin_unlock(&net->xfrm.xfrm_state_lock); - if (x->encap_sk) - sock_put(rcu_dereference_raw(x->encap_sk)); - xfrm_dev_state_delete(x); /* All xfrm_state objects are created by xfrm_state_alloc. @@ -1278,6 +1275,9 @@ static void __xfrm_state_insert(struct xfrm_state *x) list_add(&x->km.all, &net->xfrm.state_all); + /* Sanitize mark before store */ + x->mark.v &= x->mark.m; + h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, x->props.reqid, x->props.family); hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h); diff --git a/rust/Makefile b/rust/Makefile index 28ba3b9ee18d..ec737bad7fbb 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -344,8 +344,11 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@) rust-analyzer: - $(Q)$(srctree)/scripts/generate_rust_analyzer.py $(srctree) $(objtree) \ - $(RUST_LIB_SRC) > $(objtree)/rust-project.json + $(Q)$(srctree)/scripts/generate_rust_analyzer.py \ + --cfgs='core=$(core-cfgs)' --cfgs='alloc=$(alloc-cfgs)' \ + $(abs_srctree) $(abs_objtree) \ + $(RUST_LIB_SRC) $(KBUILD_EXTMOD) > \ + $(if $(KBUILD_EXTMOD),$(extmod_prefix),$(objtree))/rust-project.json $(obj)/core.o: private skip_clippy = 1 $(obj)/core.o: private skip_flags = -Dunreachable_pub diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 727da3c5879b..77bf18cfdae7 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -434,7 +434,7 @@ $(obj)/%.o: $(src)/%.c @echo " CLANG-bpf " $@ $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \ -I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \ - -I$(LIBBPF_INCLUDE) \ + -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES) \ -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \ -D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \ -Wno-gnu-variable-sized-type-not-at-end \ diff --git a/samples/bpf/test_cgrp2_sock.c b/samples/bpf/test_cgrp2_sock.c index a0811df888f4..8ca2a445ffa1 100644 --- a/samples/bpf/test_cgrp2_sock.c +++ b/samples/bpf/test_cgrp2_sock.c @@ -178,8 +178,10 @@ static int show_sockopts(int family) return 1; } - if (get_bind_to_device(sd, name, sizeof(name)) < 0) + if (get_bind_to_device(sd, name, sizeof(name)) < 0) { + close(sd); return 1; + } mark = get_somark(sd); prio = get_priority(sd); diff --git a/samples/bpf/xdp_adjust_tail_kern.c b/samples/bpf/xdp_adjust_tail_kern.c index ffdd548627f0..da67bcad1c63 100644 --- a/samples/bpf/xdp_adjust_tail_kern.c +++ b/samples/bpf/xdp_adjust_tail_kern.c @@ -57,6 +57,7 @@ static __always_inline void swap_mac(void *data, struct ethhdr *orig_eth) static __always_inline __u16 csum_fold_helper(__u32 csum) { + csum = (csum & 0xffff) + (csum >> 16); return ~((csum & 0xffff) + (csum >> 16)); } diff --git a/samples/ftrace/sample-trace-array.c b/samples/ftrace/sample-trace-array.c index 6aba02a31c96..77685a7eb767 100644 --- a/samples/ftrace/sample-trace-array.c +++ b/samples/ftrace/sample-trace-array.c @@ -112,7 +112,7 @@ static int __init sample_trace_array_init(void) /* * If context specific per-cpu buffers havent already been allocated. */ - trace_printk_init_buffers(); + trace_array_init_printk(tr); simple_tsk = kthread_run(simple_thread, NULL, "sample-instance"); if (IS_ERR(simple_tsk)) { diff --git a/samples/landlock/sandboxer.c b/samples/landlock/sandboxer.c index f29bb3c72230..ce9b77bc167b 100644 --- a/samples/landlock/sandboxer.c +++ b/samples/landlock/sandboxer.c @@ -65,6 +65,9 @@ static int parse_path(char *env_path, const char ***const path_list) } } *path_list = malloc(num_paths * sizeof(**path_list)); + if (!*path_list) + return -1; + for (i = 0; i < num_paths; i++) (*path_list)[i] = strsep(&env_path, ENV_PATH_TOKEN); @@ -99,6 +102,10 @@ static int populate_ruleset(const char *const env_var, const int ruleset_fd, env_path_name = strdup(env_path_name); unsetenv(env_var); num_paths = parse_path(env_path_name, &path_list); + if (num_paths < 0) { + fprintf(stderr, "Failed to allocate memory\n"); + goto out_free_name; + } if (num_paths == 1 && path_list[0][0] == '\0') { /* * Allows to not use all possible restrictions (e.g. use diff --git a/samples/pktgen/pktgen_sample01_simple.sh b/samples/pktgen/pktgen_sample01_simple.sh index 09a92ea963f9..c8e75888a9c2 100755 --- a/samples/pktgen/pktgen_sample01_simple.sh +++ b/samples/pktgen/pktgen_sample01_simple.sh @@ -72,7 +72,7 @@ if [ -n "$DST_PORT" ]; then pg_set $DEV "udp_dst_max $UDP_DST_MAX" fi -[ ! -z "$UDP_CSUM" ] && pg_set $dev "flag UDPCSUM" +[ ! -z "$UDP_CSUM" ] && pg_set $DEV "flag UDPCSUM" # Setup random UDP port src range pg_set $DEV "flag UDPSRC_RND" diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c index 608c4ae3b08a..ecc7db237f2e 100644 --- a/samples/trace_events/trace-events-sample.c +++ b/samples/trace_events/trace-events-sample.c @@ -50,7 +50,7 @@ static void do_simple_thread_func(int cnt, const char *fmt, ...) trace_foo_with_template_print("I have to be different", cnt); - trace_foo_rel_loc("Hello __rel_loc", cnt, bitmask); + trace_foo_rel_loc("Hello __rel_loc", cnt, bitmask, current->cpus_ptr); } static void simple_thread_func(int cnt) diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h index 1a92226202fc..06be777b3b14 100644 --- a/samples/trace_events/trace-events-sample.h +++ b/samples/trace_events/trace-events-sample.h @@ -200,6 +200,16 @@ * * __assign_bitmask(target_cpus, cpumask_bits(bar), nr_cpumask_bits); * + * __cpumask: This is pretty much the same as __bitmask but is specific for + * CPU masks. The type displayed to the user via the format files will + * be "cpumaks_t" such that user space may deal with them differently + * if they choose to do so, and the bits is always set to nr_cpumask_bits. + * + * __cpumask(target_cpu) + * + * To assign a cpumask, use the __assign_cpumask() helper macro. + * + * __assign_cpumask(target_cpus, cpumask_bits(bar)); * * fast_assign: This is a C like function that is used to store the items * into the ring buffer. A special variable called "__entry" will be the @@ -212,8 +222,8 @@ * This is also used to print out the data from the trace files. * Again, the __entry macro is used to access the data from the ring buffer. * - * Note, __dynamic_array, __string, and __bitmask require special helpers - * to access the data. + * Note, __dynamic_array, __string, __bitmask and __cpumask require special + * helpers to access the data. * * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) * Use __get_dynamic_array_len(foo) to get the length of the array @@ -226,6 +236,8 @@ * * For __bitmask(target_cpus, nr_cpumask_bits) use __get_bitmask(target_cpus) * + * For __cpumask(target_cpus) use __get_cpumask(target_cpus) + * * * Note, that for both the assign and the printk, __entry is the handler * to the data structure in the ring buffer, and is defined by the @@ -288,7 +300,9 @@ TRACE_EVENT(foo_bar, __dynamic_array(int, list, __length_of(lst)) __string( str, string ) __bitmask( cpus, num_possible_cpus() ) + __cpumask( cpum ) __vstring( vstr, fmt, va ) + __string_len( lstr, foo, bar / 2 < strlen(foo) ? bar / 2 : strlen(foo) ) ), TP_fast_assign( @@ -297,11 +311,14 @@ TRACE_EVENT(foo_bar, memcpy(__get_dynamic_array(list), lst, __length_of(lst) * sizeof(int)); __assign_str(str, string); + __assign_str(lstr, foo); __assign_vstr(vstr, fmt, va); __assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus()); + __assign_cpumask(cpum, cpumask_bits(mask)); ), - TP_printk("foo %s %d %s %s %s %s (%s) %s", __entry->foo, __entry->bar, + TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s [%d] %*pbl", + __entry->foo, __entry->bar, /* * Notice here the use of some helper functions. This includes: @@ -345,7 +362,17 @@ TRACE_EVENT(foo_bar, __print_array(__get_dynamic_array(list), __get_dynamic_array_len(list) / sizeof(int), sizeof(int)), - __get_str(str), __get_bitmask(cpus), __get_str(vstr)) + +/* A shortcut is to use __print_dynamic_array for dynamic arrays */ + + __print_dynamic_array(list, sizeof(int)), + + __get_str(str), __get_str(lstr), + __get_bitmask(cpus), __get_cpumask(cpum), + __get_str(vstr), + __get_dynamic_array_len(cpus), + __get_dynamic_array_len(cpus), + __get_dynamic_array(cpus)) ); /* @@ -542,15 +569,16 @@ DEFINE_EVENT_PRINT(foo_template, foo_with_template_print, TRACE_EVENT(foo_rel_loc, - TP_PROTO(const char *foo, int bar, unsigned long *mask), + TP_PROTO(const char *foo, int bar, unsigned long *mask, const cpumask_t *cpus), - TP_ARGS(foo, bar, mask), + TP_ARGS(foo, bar, mask, cpus), TP_STRUCT__entry( __rel_string( foo, foo ) __field( int, bar ) __rel_bitmask( bitmask, BITS_PER_BYTE * sizeof(unsigned long) ) + __rel_cpumask( cpumask ) ), TP_fast_assign( @@ -558,10 +586,12 @@ TRACE_EVENT(foo_rel_loc, __entry->bar = bar; __assign_rel_bitmask(bitmask, mask, BITS_PER_BYTE * sizeof(unsigned long)); + __assign_rel_cpumask(cpumask, cpus); ), - TP_printk("foo_rel_loc %s, %d, %s", __get_rel_str(foo), __entry->bar, - __get_rel_bitmask(bitmask)) + TP_printk("foo_rel_loc %s, %d, %s, %s", __get_rel_str(foo), __entry->bar, + __get_rel_bitmask(bitmask), + __get_rel_cpumask(cpumask)) ); #endif diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn index fa5ef4180688..0971b1222e47 100644 --- a/scripts/Makefile.extrawarn +++ b/scripts/Makefile.extrawarn @@ -38,6 +38,10 @@ KBUILD_CFLAGS += -Wno-sign-compare KBUILD_CFLAGS += -Wno-type-limits KBUILD_CFLAGS += -Wno-shift-negative-value +ifdef CONFIG_CC_IS_CLANG +KBUILD_CFLAGS += -Wno-enum-enum-conversion +endif + KBUILD_CPPFLAGS += -DKBUILD_EXTRA_WARN1 else @@ -66,7 +70,6 @@ KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare KBUILD_CFLAGS += $(call cc-disable-warning, unaligned-access) KBUILD_CFLAGS += $(call cc-disable-warning, cast-function-type-strict) KBUILD_CFLAGS += -Wno-enum-compare-conditional -KBUILD_CFLAGS += -Wno-enum-enum-conversion endif endif diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index fcc15b9546c1..c016b57af438 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -263,7 +263,7 @@ objtool-args-$(CONFIG_SLS) += --sls objtool-args-$(CONFIG_STACK_VALIDATION) += --stackval objtool-args-$(CONFIG_HAVE_STATIC_CALL_INLINE) += --static-call objtool-args-$(CONFIG_HAVE_UACCESS_VALIDATION) += --uaccess -objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable +objtool-args-$(or $(CONFIG_GCOV_KERNEL),$(CONFIG_KCOV)) += --no-unreachable objtool-args = $(objtool-args-y) \ $(if $(delay-objtool), --link) \ @@ -472,10 +472,10 @@ quiet_cmd_lzo_with_size = LZO $@ cmd_lzo_with_size = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@ quiet_cmd_lz4 = LZ4 $@ - cmd_lz4 = cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout > $@ + cmd_lz4 = cat $(real-prereqs) | $(LZ4) -l -9 - - > $@ quiet_cmd_lz4_with_size = LZ4 $@ - cmd_lz4_with_size = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \ + cmd_lz4_with_size = { cat $(real-prereqs) | $(LZ4) -l -9 - -; \ $(size_append); } > $@ quiet_cmd_lz4c = LZ4C $@ diff --git a/scripts/config b/scripts/config index ff88e2faefd3..ea475c07de28 100755 --- a/scripts/config +++ b/scripts/config @@ -32,6 +32,7 @@ commands: Disable option directly after other option --module-after|-M beforeopt option Turn option into module directly after other option + --refresh Refresh the config using old settings commands can be repeated multiple times @@ -124,16 +125,22 @@ undef_var() { txt_delete "^# $name is not set" "$FN" } -if [ "$1" = "--file" ]; then - FN="$2" - if [ "$FN" = "" ] ; then - usage +FN=.config +CMDS=() +while [[ $# -gt 0 ]]; do + if [ "$1" = "--file" ]; then + if [ "$2" = "" ]; then + usage + fi + FN="$2" + shift 2 + else + CMDS+=("$1") + shift fi - shift 2 -else - FN=.config -fi +done +set -- "${CMDS[@]}" if [ "$1" = "" ] ; then usage fi @@ -217,9 +224,8 @@ while [ "$1" != "" ] ; do set_var "${CONFIG_}$B" "${CONFIG_}$B=m" "${CONFIG_}$A" ;; - # undocumented because it ignores --file (fixme) --refresh) - yes "" | make oldconfig + yes "" | make oldconfig KCONFIG_CONFIG=$FN ;; *) diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py index 75bb611bd751..1093c540e324 100755 --- a/scripts/generate_rust_analyzer.py +++ b/scripts/generate_rust_analyzer.py @@ -6,10 +6,19 @@ import argparse import json import logging +import os import pathlib import sys -def generate_crates(srctree, objtree, sysroot_src): +def args_crates_cfgs(cfgs): + crates_cfgs = {} + for cfg in cfgs: + crate, vals = cfg.split("=", 1) + crates_cfgs[crate] = vals.replace("--cfg", "").split() + + return crates_cfgs + +def generate_crates(srctree, objtree, sysroot_src, external_src, cfgs): # Generate the configuration list. cfg = [] with open(objtree / "include" / "generated" / "rustc_cfg") as fd: @@ -23,6 +32,7 @@ def generate_crates(srctree, objtree, sysroot_src): # Avoid O(n^2) iterations by keeping a map of indexes. crates = [] crates_indexes = {} + crates_cfgs = args_crates_cfgs(cfgs) def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=True, is_proc_macro=False): crates_indexes[display_name] = len(crates) @@ -39,13 +49,26 @@ def generate_crates(srctree, objtree, sysroot_src): } }) - # First, the ones in `rust/` since they are a bit special. - append_crate( - "core", - sysroot_src / "core" / "src" / "lib.rs", - [], - is_workspace_member=False, - ) + def append_sysroot_crate( + display_name, + deps, + cfg=[], + ): + append_crate( + display_name, + sysroot_src / display_name / "src" / "lib.rs", + deps, + cfg, + is_workspace_member=False, + ) + + # NB: sysroot crates reexport items from one another so setting up our transitive dependencies + # here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth + # for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`. + append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", [])) + append_sysroot_crate("alloc", ["core"]) + append_sysroot_crate("std", ["alloc", "core"]) + append_sysroot_crate("proc_macro", ["core", "std"]) append_crate( "compiler_builtins", @@ -57,15 +80,16 @@ def generate_crates(srctree, objtree, sysroot_src): "alloc", srctree / "rust" / "alloc" / "lib.rs", ["core", "compiler_builtins"], + cfg=crates_cfgs.get("alloc", []), ) append_crate( "macros", srctree / "rust" / "macros" / "lib.rs", - [], + ["std", "proc_macro"], is_proc_macro=True, ) - crates[-1]["proc_macro_dylib_path"] = "rust/libmacros.so" + crates[-1]["proc_macro_dylib_path"] = f"{objtree}/rust/libmacros.so" append_crate( "bindings", @@ -89,16 +113,26 @@ def generate_crates(srctree, objtree, sysroot_src): "exclude_dirs": [], } + def is_root_crate(build_file, target): + try: + return f"{target}.o" in open(build_file).read() + except FileNotFoundError: + return False + # Then, the rest outside of `rust/`. # # We explicitly mention the top-level folders we want to cover. - for folder in ("samples", "drivers"): - for path in (srctree / folder).rglob("*.rs"): + extra_dirs = map(lambda dir: srctree / dir, ("samples", "drivers")) + if external_src is not None: + extra_dirs = [external_src] + for folder in extra_dirs: + for path in folder.rglob("*.rs"): logging.info("Checking %s", path) name = path.name.replace(".rs", "") # Skip those that are not crate roots. - if f"{name}.o" not in open(path.parent / "Makefile").read(): + if not is_root_crate(path.parent / "Makefile", name) and \ + not is_root_crate(path.parent / "Kbuild", name): continue logging.info("Adding %s", name) @@ -114,9 +148,11 @@ def generate_crates(srctree, objtree, sysroot_src): def main(): parser = argparse.ArgumentParser() parser.add_argument('--verbose', '-v', action='store_true') + parser.add_argument('--cfgs', action='append', default=[]) parser.add_argument("srctree", type=pathlib.Path) parser.add_argument("objtree", type=pathlib.Path) parser.add_argument("sysroot_src", type=pathlib.Path) + parser.add_argument("exttree", type=pathlib.Path, nargs="?") args = parser.parse_args() logging.basicConfig( @@ -125,7 +161,7 @@ def main(): ) rust_project = { - "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src), + "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs), "sysroot_src": str(args.sysroot_src), } diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c index f5dfdb9d80e9..6b0eb3898e4e 100644 --- a/scripts/genksyms/genksyms.c +++ b/scripts/genksyms/genksyms.c @@ -241,6 +241,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type, "unchanged\n"); } sym->is_declared = 1; + free_list(defn, NULL); return sym; } else if (!sym->is_declared) { if (sym->is_override && flag_preserve) { @@ -249,6 +250,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type, print_type_name(type, name); fprintf(stderr, " modversion change\n"); sym->is_declared = 1; + free_list(defn, NULL); return sym; } else { status = is_unknown_symbol(sym) ? @@ -256,6 +258,7 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type, } } else { error_with_pos("redefinition of %s", name); + free_list(defn, NULL); return sym; } break; @@ -271,11 +274,15 @@ static struct symbol *__add_symbol(const char *name, enum symbol_type type, break; } } + + free_list(sym->defn, NULL); + free(sym->name); + free(sym); --nsyms; } sym = xmalloc(sizeof(*sym)); - sym->name = name; + sym->name = xstrdup(name); sym->type = type; sym->defn = defn; sym->expansion_trail = NULL; @@ -482,7 +489,7 @@ static void read_reference(FILE *f) defn = def; def = read_node(f); } - subsym = add_reference_symbol(xstrdup(sym->string), sym->tag, + subsym = add_reference_symbol(sym->string, sym->tag, defn, is_extern); subsym->is_override = is_override; free_node(sym); diff --git a/scripts/genksyms/genksyms.h b/scripts/genksyms/genksyms.h index 21ed2ec2d98c..5621533dcb8e 100644 --- a/scripts/genksyms/genksyms.h +++ b/scripts/genksyms/genksyms.h @@ -32,7 +32,7 @@ struct string_list { struct symbol { struct symbol *hash_next; - const char *name; + char *name; enum symbol_type type; struct string_list *defn; struct symbol *expansion_trail; diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y index 8e9b5e69e8f0..689cb6bb40b6 100644 --- a/scripts/genksyms/parse.y +++ b/scripts/genksyms/parse.y @@ -152,14 +152,19 @@ simple_declaration: ; init_declarator_list_opt: - /* empty */ { $$ = NULL; } - | init_declarator_list + /* empty */ { $$ = NULL; } + | init_declarator_list { free_list(decl_spec, NULL); $$ = $1; } ; init_declarator_list: init_declarator { struct string_list *decl = *$1; *$1 = NULL; + + /* avoid sharing among multiple init_declarators */ + if (decl_spec) + decl_spec = copy_list_range(decl_spec, NULL); + add_symbol(current_name, is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern); current_name = NULL; @@ -170,6 +175,11 @@ init_declarator_list: *$3 = NULL; free_list(*$2, NULL); *$2 = decl_spec; + + /* avoid sharing among multiple init_declarators */ + if (decl_spec) + decl_spec = copy_list_range(decl_spec, NULL); + add_symbol(current_name, is_typedef ? SYM_TYPEDEF : SYM_NORMAL, decl, is_extern); current_name = NULL; @@ -472,12 +482,12 @@ enumerator_list: enumerator: IDENT { - const char *name = strdup((*$1)->string); + const char *name = (*$1)->string; add_symbol(name, SYM_ENUM_CONST, NULL, 0); } | IDENT '=' EXPRESSION_PHRASE { - const char *name = strdup((*$1)->string); + const char *name = (*$1)->string; struct string_list *expr = copy_list_range(*$3, *$2); add_symbol(name, SYM_ENUM_CONST, expr, 0); } diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c index 33d19e419908..662a5e7c37c2 100644 --- a/scripts/kconfig/conf.c +++ b/scripts/kconfig/conf.c @@ -827,6 +827,9 @@ int main(int ac, char **av) break; } + if (conf_errors()) + exit(1); + if (sync_kconfig) { name = getenv("KCONFIG_NOSILENTUPDATE"); if (name && *name) { @@ -890,6 +893,9 @@ int main(int ac, char **av) break; } + if (sym_dep_errors()) + exit(1); + if (input_mode == savedefconfig) { if (conf_write_defconfig(defconfig_file)) { fprintf(stderr, "n*** Error while saving defconfig to: %s\n\n", diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index 992575f1e976..f214e8d3762e 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -155,6 +155,13 @@ static void conf_message(const char *fmt, ...) static const char *conf_filename; static int conf_lineno, conf_warnings; +bool conf_errors(void) +{ + if (conf_warnings) + return getenv("KCONFIG_WERROR"); + return false; +} + static void conf_warning(const char *fmt, ...) { va_list ap; @@ -346,10 +353,12 @@ int conf_read_simple(const char *name, int def) FILE *in = NULL; char *line = NULL; size_t line_asize = 0; - char *p, *p2; + char *p, *p2, *val; struct symbol *sym; int i, def_flags; + const char *warn_unknown, *sym_name; + warn_unknown = getenv("KCONFIG_WARN_UNKNOWN_SYMBOLS"); if (name) { in = zconf_fopen(name); } else { @@ -382,10 +391,12 @@ int conf_read_simple(const char *name, int def) *p = '\0'; - in = zconf_fopen(env); + name = env; + + in = zconf_fopen(name); if (in) { conf_message("using defaults found in %s", - env); + name); goto load; } @@ -424,71 +435,34 @@ load: while (compat_getline(&line, &line_asize, in) != -1) { conf_lineno++; - sym = NULL; if (line[0] == '#') { - if (memcmp(line + 2, CONFIG_, strlen(CONFIG_))) + if (line[1] != ' ') continue; - p = strchr(line + 2 + strlen(CONFIG_), ' '); + p = line + 2; + if (memcmp(p, CONFIG_, strlen(CONFIG_))) + continue; + sym_name = p + strlen(CONFIG_); + p = strchr(sym_name, ' '); if (!p) continue; *p++ = 0; if (strncmp(p, "is not set", 10)) continue; - if (def == S_DEF_USER) { - sym = sym_find(line + 2 + strlen(CONFIG_)); - if (!sym) { - conf_set_changed(true); - continue; - } - } else { - sym = sym_lookup(line + 2 + strlen(CONFIG_), 0); - if (sym->type == S_UNKNOWN) - sym->type = S_BOOLEAN; - } - if (sym->flags & def_flags) { - conf_warning("override: reassigning to symbol %s", sym->name); - } - switch (sym->type) { - case S_BOOLEAN: - case S_TRISTATE: - sym->def[def].tri = no; - sym->flags |= def_flags; - break; - default: - ; - } + + val = "n"; } else if (memcmp(line, CONFIG_, strlen(CONFIG_)) == 0) { - p = strchr(line + strlen(CONFIG_), '='); + sym_name = line + strlen(CONFIG_); + p = strchr(sym_name, '='); if (!p) continue; *p++ = 0; + val = p; p2 = strchr(p, '\n'); if (p2) { *p2-- = 0; if (*p2 == '\r') *p2 = 0; } - - sym = sym_find(line + strlen(CONFIG_)); - if (!sym) { - if (def == S_DEF_AUTO) - /* - * Reading from include/config/auto.conf - * If CONFIG_FOO previously existed in - * auto.conf but it is missing now, - * include/config/FOO must be touched. - */ - conf_touch_dep(line + strlen(CONFIG_)); - else - conf_set_changed(true); - continue; - } - - if (sym->flags & def_flags) { - conf_warning("override: reassigning to symbol %s", sym->name); - } - if (conf_set_sym_val(sym, def, def_flags, p)) - continue; } else { if (line[0] != '\r' && line[0] != '\n') conf_warning("unexpected data: %.*s", @@ -497,6 +471,31 @@ load: continue; } + sym = sym_find(sym_name); + if (!sym) { + if (def == S_DEF_AUTO) { + /* + * Reading from include/config/auto.conf. + * If CONFIG_FOO previously existed in auto.conf + * but it is missing now, include/config/FOO + * must be touched. + */ + conf_touch_dep(sym_name); + } else { + if (warn_unknown) + conf_warning("unknown symbol: %s", sym_name); + + conf_set_changed(true); + } + continue; + } + + if (sym->flags & def_flags) + conf_warning("override: reassigning to symbol %s", sym->name); + + if (conf_set_sym_val(sym, def, def_flags, val)) + continue; + if (sym && sym_is_choice_value(sym)) { struct symbol *cs = prop_get_symbol(sym_get_choice_prop(sym)); switch (sym->def[def].tri) { @@ -519,6 +518,7 @@ load: } free(line); fclose(in); + return 0; } diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h index edd1e617b25c..e4931bde7ca7 100644 --- a/scripts/kconfig/lkc_proto.h +++ b/scripts/kconfig/lkc_proto.h @@ -12,6 +12,7 @@ void conf_set_changed(bool val); bool conf_get_changed(void); void conf_set_changed_callback(void (*fn)(void)); void conf_set_message_callback(void (*fn)(const char *s)); +bool conf_errors(void); /* symbol.c */ extern struct symbol * symbol_hash[SYMBOL_HASHSIZE]; @@ -22,6 +23,7 @@ void print_symbol_for_listconfig(struct symbol *sym); struct symbol ** sym_re_search(const char *pattern); const char * sym_type_name(enum symbol_type type); void sym_calc_value(struct symbol *sym); +bool sym_dep_errors(void); enum symbol_type sym_get_type(struct symbol *sym); bool sym_tristate_within_range(struct symbol *sym,tristate tri); bool sym_set_tristate_value(struct symbol *sym,tristate tri); diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 72da3b8d6f30..151f9938abaa 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -105,8 +105,8 @@ INITFILE=$1 shift; if [ ! -r "$INITFILE" ]; then - echo "The base file '$INITFILE' does not exist. Exit." >&2 - exit 1 + echo "The base file '$INITFILE' does not exist. Creating one..." >&2 + touch "$INITFILE" fi MERGE_LIST=$* diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c index 7b1df55b0176..1c0306c9d74e 100644 --- a/scripts/kconfig/symbol.c +++ b/scripts/kconfig/symbol.c @@ -40,6 +40,7 @@ static struct symbol symbol_empty = { struct symbol *modules_sym; static tristate modules_val; +static int sym_warnings; enum symbol_type sym_get_type(struct symbol *sym) { @@ -320,6 +321,15 @@ static void sym_warn_unmet_dep(struct symbol *sym) " Selected by [m]:\n"); fputs(str_get(&gs), stderr); + str_free(&gs); + sym_warnings++; +} + +bool sym_dep_errors(void) +{ + if (sym_warnings) + return getenv("KCONFIG_WERROR"); + return false; } void sym_calc_value(struct symbol *sym) diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 39e2c8883ddd..ba09484ef910 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -741,8 +741,8 @@ static void do_input(char *alias, for (i = min / BITS_PER_LONG; i < max / BITS_PER_LONG + 1; i++) arr[i] = TO_NATIVE(arr[i]); - for (i = min; i < max; i++) - if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG))) + for (i = min; i <= max; i++) + if (arr[i / BITS_PER_LONG] & (1ULL << (i%BITS_PER_LONG))) sprintf(alias + strlen(alias), "%X,*", i); } @@ -808,10 +808,7 @@ static int do_eisa_entry(const char *filename, void *symval, char *alias) { DEF_FIELD_ADDR(symval, eisa_device_id, sig); - if (sig[0]) - sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", *sig); - else - strcat(alias, "*"); + sprintf(alias, EISA_DEVICE_MODALIAS_FMT "*", *sig); return 1; } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 686eed37f978..c2161d77672d 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -829,7 +829,7 @@ static void check_section(const char *modname, struct elf_info *elf, ".ltext", ".ltext.*" #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ ".fixup", ".entry.text", ".exception.text", \ - ".coldtext", ".softirqentry.text" + ".coldtext", ".softirqentry.text", ".irqentry.text" #define INIT_SECTIONS ".init.*" #define MEM_INIT_SECTIONS ".meminit.*" @@ -889,7 +889,7 @@ enum mismatch { struct sectioncheck { const char *fromsec[20]; const char *bad_tosec[20]; - const char *good_tosec[20]; + const char *good_tosec[21]; enum mismatch mismatch; void (*handler)(const char *modname, struct elf_info *elf, const struct sectioncheck* const mismatch, diff --git a/scripts/selinux/install_policy.sh b/scripts/selinux/install_policy.sh index 24086793b0d8..db40237e60ce 100755 --- a/scripts/selinux/install_policy.sh +++ b/scripts/selinux/install_policy.sh @@ -6,27 +6,24 @@ if [ `id -u` -ne 0 ]; then exit 1 fi -SF=`which setfiles` -if [ $? -eq 1 ]; then +SF=`which setfiles` || { echo "Could not find setfiles" echo "Do you have policycoreutils installed?" exit 1 -fi +} -CP=`which checkpolicy` -if [ $? -eq 1 ]; then +CP=`which checkpolicy` || { echo "Could not find checkpolicy" echo "Do you have checkpolicy installed?" exit 1 -fi +} VERS=`$CP -V | awk '{print $1}'` -ENABLED=`which selinuxenabled` -if [ $? -eq 1 ]; then +ENABLED=`which selinuxenabled` || { echo "Could not find selinuxenabled" echo "Do you have libselinux-utils installed?" exit 1 -fi +} if selinuxenabled; then echo "SELinux is already enabled" diff --git a/scripts/sign-file.c b/scripts/sign-file.c index 3edb156ae52c..7070245edfc1 100644 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -27,14 +27,17 @@ #include #include #include -#include - -/* - * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. - * - * Remove this if/when that API is no longer used - */ -#pragma GCC diagnostic ignored "-Wdeprecated-declarations" +#if OPENSSL_VERSION_MAJOR >= 3 +# define USE_PKCS11_PROVIDER +# include +# include +#else +# if !defined(OPENSSL_NO_ENGINE) && !defined(OPENSSL_NO_DEPRECATED_3_0) +# define USE_PKCS11_ENGINE +# include +# endif +#endif +#include "ssl-common.h" /* * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to @@ -83,41 +86,6 @@ void format(void) exit(2); } -static void display_openssl_errors(int l) -{ - const char *file; - char buf[120]; - int e, line; - - if (ERR_peek_error() == 0) - return; - fprintf(stderr, "At main.c:%d:\n", l); - - while ((e = ERR_get_error_line(&file, &line))) { - ERR_error_string(e, buf); - fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line); - } -} - -static void drain_openssl_errors(void) -{ - const char *file; - int line; - - if (ERR_peek_error() == 0) - return; - while (ERR_get_error_line(&file, &line)) {} -} - -#define ERR(cond, fmt, ...) \ - do { \ - bool __cond = (cond); \ - display_openssl_errors(__LINE__); \ - if (__cond) { \ - errx(1, fmt, ## __VA_ARGS__); \ - } \ - } while(0) - static const char *key_pass; static int pem_pw_cb(char *buf, int len, int w, void *v) @@ -139,28 +107,64 @@ static int pem_pw_cb(char *buf, int len, int w, void *v) return pwlen; } +static EVP_PKEY *read_private_key_pkcs11(const char *private_key_name) +{ + EVP_PKEY *private_key = NULL; +#ifdef USE_PKCS11_PROVIDER + OSSL_STORE_CTX *store; + + if (!OSSL_PROVIDER_try_load(NULL, "pkcs11", true)) + ERR(1, "OSSL_PROVIDER_try_load(pkcs11)"); + if (!OSSL_PROVIDER_try_load(NULL, "default", true)) + ERR(1, "OSSL_PROVIDER_try_load(default)"); + + store = OSSL_STORE_open(private_key_name, NULL, NULL, NULL, NULL); + ERR(!store, "OSSL_STORE_open"); + + while (!OSSL_STORE_eof(store)) { + OSSL_STORE_INFO *info = OSSL_STORE_load(store); + + if (!info) { + drain_openssl_errors(__LINE__, 0); + continue; + } + if (OSSL_STORE_INFO_get_type(info) == OSSL_STORE_INFO_PKEY) { + private_key = OSSL_STORE_INFO_get1_PKEY(info); + ERR(!private_key, "OSSL_STORE_INFO_get1_PKEY"); + } + OSSL_STORE_INFO_free(info); + if (private_key) + break; + } + OSSL_STORE_close(store); +#elif defined(USE_PKCS11_ENGINE) + ENGINE *e; + + ENGINE_load_builtin_engines(); + drain_openssl_errors(__LINE__, 1); + e = ENGINE_by_id("pkcs11"); + ERR(!e, "Load PKCS#11 ENGINE"); + if (ENGINE_init(e)) + drain_openssl_errors(__LINE__, 1); + else + ERR(1, "ENGINE_init"); + if (key_pass) + ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN"); + private_key = ENGINE_load_private_key(e, private_key_name, NULL, NULL); + ERR(!private_key, "%s", private_key_name); +#else + fprintf(stderr, "no pkcs11 engine/provider available\n"); + exit(1); +#endif + return private_key; +} + static EVP_PKEY *read_private_key(const char *private_key_name) { - EVP_PKEY *private_key; - if (!strncmp(private_key_name, "pkcs11:", 7)) { - ENGINE *e; - - ENGINE_load_builtin_engines(); - drain_openssl_errors(); - e = ENGINE_by_id("pkcs11"); - ERR(!e, "Load PKCS#11 ENGINE"); - if (ENGINE_init(e)) - drain_openssl_errors(); - else - ERR(1, "ENGINE_init"); - if (key_pass) - ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), - "Set PKCS#11 PIN"); - private_key = ENGINE_load_private_key(e, private_key_name, - NULL, NULL); - ERR(!private_key, "%s", private_key_name); + return read_private_key_pkcs11(private_key_name); } else { + EVP_PKEY *private_key; BIO *b; b = BIO_new_file(private_key_name, "rb"); @@ -169,9 +173,9 @@ static EVP_PKEY *read_private_key(const char *private_key_name) NULL); ERR(!private_key, "%s", private_key_name); BIO_free(b); - } - return private_key; + return private_key; + } } static X509 *read_x509(const char *x509_name) @@ -306,7 +310,7 @@ int main(int argc, char **argv) /* Digest the module data. */ OpenSSL_add_all_digests(); - display_openssl_errors(__LINE__); + drain_openssl_errors(__LINE__, 0); digest_algo = EVP_get_digestbyname(hash_algo); ERR(!digest_algo, "EVP_get_digestbyname"); diff --git a/scripts/sorttable.h b/scripts/sorttable.h index deb7c1d3e979..f0ba2bf5a886 100644 --- a/scripts/sorttable.h +++ b/scripts/sorttable.h @@ -110,7 +110,7 @@ static inline unsigned long orc_ip(const int *ip) static int orc_sort_cmp(const void *_a, const void *_b) { - struct orc_entry *orc_a; + struct orc_entry *orc_a, *orc_b; const int *a = g_orc_ip_table + *(int *)_a; const int *b = g_orc_ip_table + *(int *)_b; unsigned long a_val = orc_ip(a); @@ -128,6 +128,10 @@ static int orc_sort_cmp(const void *_a, const void *_b) * whitelisted .o files which didn't get objtool generation. */ orc_a = g_orc_table + (a - g_orc_ip_table); + orc_b = g_orc_table + (b - g_orc_ip_table); + if (orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end && + orc_b->sp_reg == ORC_REG_UNDEFINED && !orc_b->end) + return 0; return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1; } diff --git a/scripts/ssl-common.h b/scripts/ssl-common.h new file mode 100644 index 000000000000..2db0e181143c --- /dev/null +++ b/scripts/ssl-common.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: LGPL-2.1+ */ +/* + * SSL helper functions shared by sign-file and extract-cert. + */ + +static void drain_openssl_errors(int l, int silent) +{ + const char *file; + char buf[120]; + int e, line; + + if (ERR_peek_error() == 0) + return; + if (!silent) + fprintf(stderr, "At main.c:%d:\n", l); + + while ((e = ERR_peek_error_line(&file, &line))) { + ERR_error_string(e, buf); + if (!silent) + fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line); + ERR_get_error(); + } +} + +#define ERR(cond, fmt, ...) \ + do { \ + bool __cond = (cond); \ + drain_openssl_errors(__LINE__, 0); \ + if (__cond) { \ + errx(1, fmt, ## __VA_ARGS__); \ + } \ + } while (0) diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c index deccea8654ad..1b13fd89d5a9 100644 --- a/security/apparmor/capability.c +++ b/security/apparmor/capability.c @@ -94,6 +94,8 @@ static int audit_caps(struct common_audit_data *sa, struct aa_profile *profile, return error; } else { aa_put_profile(ent->profile); + if (profile != ent->profile) + cap_clear(ent->caps); ent->profile = aa_get_profile(profile); cap_raise(ent->caps, cap); } diff --git a/security/apparmor/policy_unpack_test.c b/security/apparmor/policy_unpack_test.c index 0711a0305df3..d5cdc3cb0755 100644 --- a/security/apparmor/policy_unpack_test.c +++ b/security/apparmor/policy_unpack_test.c @@ -286,6 +286,8 @@ static void policy_unpack_test_unpack_strdup_with_null_name(struct kunit *test) ((uintptr_t)puf->e->start <= (uintptr_t)string) && ((uintptr_t)string <= (uintptr_t)puf->e->end)); KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA); + + kfree(string); } static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test) @@ -301,6 +303,8 @@ static void policy_unpack_test_unpack_strdup_with_name(struct kunit *test) ((uintptr_t)puf->e->start <= (uintptr_t)string) && ((uintptr_t)string <= (uintptr_t)puf->e->end)); KUNIT_EXPECT_STREQ(test, string, TEST_STRING_DATA); + + kfree(string); } static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test) @@ -318,6 +322,8 @@ static void policy_unpack_test_unpack_strdup_out_of_bounds(struct kunit *test) KUNIT_EXPECT_EQ(test, size, 0); KUNIT_EXPECT_NULL(test, string); KUNIT_EXPECT_PTR_EQ(test, puf->e->pos, start); + + kfree(string); } static void policy_unpack_test_unpack_nameX_with_null_name(struct kunit *test) diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index 41ec31debe87..bf5267bcd7fe 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c @@ -318,15 +318,21 @@ static int ima_eventdigest_init_common(const u8 *digest, u32 digestsize, hash_algo_name[hash_algo]); } - if (digest) + if (digest) { memcpy(buffer + offset, digest, digestsize); - else + } else { /* * If digest is NULL, the event being recorded is a violation. * Make room for the digest by increasing the offset by the - * hash algorithm digest size. + * hash algorithm digest size. If the hash algorithm is not + * specified increase the offset by IMA_DIGEST_SIZE which + * fits SHA1 or MD5 */ - offset += hash_digest_size[hash_algo]; + if (hash_algo < HASH_ALGO__LAST) + offset += hash_digest_size[hash_algo]; + else + offset += IMA_DIGEST_SIZE; + } return ima_write_template_field_data(buffer, offset + digestsize, fmt, field_data); diff --git a/security/landlock/errata.h b/security/landlock/errata.h new file mode 100644 index 000000000000..fe91ef0e6f72 --- /dev/null +++ b/security/landlock/errata.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Landlock - Errata information + * + * Copyright © 2025 Microsoft Corporation + */ + +#ifndef _SECURITY_LANDLOCK_ERRATA_H +#define _SECURITY_LANDLOCK_ERRATA_H + +#include + +struct landlock_erratum { + const int abi; + const u8 number; +}; + +/* clang-format off */ +#define LANDLOCK_ERRATUM(NUMBER) \ + { \ + .abi = LANDLOCK_ERRATA_ABI, \ + .number = NUMBER, \ + }, +/* clang-format on */ + +/* + * Some fixes may require user space to check if they are applied on the running + * kernel before using a specific feature. For instance, this applies when a + * restriction was previously too restrictive and is now getting relaxed (for + * compatibility or semantic reasons). However, non-visible changes for + * legitimate use (e.g. security fixes) do not require an erratum. + */ +static const struct landlock_erratum landlock_errata_init[] __initconst = { + +/* + * Only Sparse may not implement __has_include. If a compiler does not + * implement __has_include, a warning will be printed at boot time (see + * setup.c). + */ +#ifdef __has_include + +#define LANDLOCK_ERRATA_ABI 1 +#if __has_include("errata/abi-1.h") +#include "errata/abi-1.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 2 +#if __has_include("errata/abi-2.h") +#include "errata/abi-2.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 3 +#if __has_include("errata/abi-3.h") +#include "errata/abi-3.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +#define LANDLOCK_ERRATA_ABI 4 +#if __has_include("errata/abi-4.h") +#include "errata/abi-4.h" +#endif +#undef LANDLOCK_ERRATA_ABI + +/* + * For each new erratum, we need to include all the ABI files up to the impacted + * ABI to make all potential future intermediate errata easy to backport. + * + * If such change involves more than one ABI addition, then it must be in a + * dedicated commit with the same Fixes tag as used for the actual fix. + * + * Each commit creating a new security/landlock/errata/abi-*.h file must have a + * Depends-on tag to reference the commit that previously added the line to + * include this new file, except if the original Fixes tag is enough. + * + * Each erratum must be documented in its related ABI file, and a dedicated + * commit must update Documentation/userspace-api/landlock.rst to include this + * erratum. This commit will not be backported. + */ + +#endif + + {} +}; + +#endif /* _SECURITY_LANDLOCK_ERRATA_H */ diff --git a/security/landlock/fs.c b/security/landlock/fs.c index 7b0e5976113c..7b95afcc6b43 100644 --- a/security/landlock/fs.c +++ b/security/landlock/fs.c @@ -669,10 +669,6 @@ static inline access_mask_t get_mode_access(const umode_t mode) switch (mode & S_IFMT) { case S_IFLNK: return LANDLOCK_ACCESS_FS_MAKE_SYM; - case 0: - /* A zero mode translates to S_IFREG. */ - case S_IFREG: - return LANDLOCK_ACCESS_FS_MAKE_REG; case S_IFDIR: return LANDLOCK_ACCESS_FS_MAKE_DIR; case S_IFCHR: @@ -683,9 +679,12 @@ static inline access_mask_t get_mode_access(const umode_t mode) return LANDLOCK_ACCESS_FS_MAKE_FIFO; case S_IFSOCK: return LANDLOCK_ACCESS_FS_MAKE_SOCK; + case S_IFREG: + case 0: + /* A zero mode translates to S_IFREG. */ default: - WARN_ON_ONCE(1); - return 0; + /* Treats weird files as regular files. */ + return LANDLOCK_ACCESS_FS_MAKE_REG; } } diff --git a/security/landlock/setup.c b/security/landlock/setup.c index f8e8e980454c..cdee579901da 100644 --- a/security/landlock/setup.c +++ b/security/landlock/setup.c @@ -6,11 +6,13 @@ * Copyright © 2018-2020 ANSSI */ +#include #include #include #include "common.h" #include "cred.h" +#include "errata.h" #include "fs.h" #include "ptrace.h" #include "setup.h" @@ -23,8 +25,36 @@ struct lsm_blob_sizes landlock_blob_sizes __lsm_ro_after_init = { .lbs_superblock = sizeof(struct landlock_superblock_security), }; +int landlock_errata __ro_after_init; + +static void __init compute_errata(void) +{ + size_t i; + +#ifndef __has_include + /* + * This is a safeguard to make sure the compiler implements + * __has_include (see errata.h). + */ + WARN_ON_ONCE(1); + return; +#endif + + for (i = 0; landlock_errata_init[i].number; i++) { + const int prev_errata = landlock_errata; + + if (WARN_ON_ONCE(landlock_errata_init[i].abi > + landlock_abi_version)) + continue; + + landlock_errata |= BIT(landlock_errata_init[i].number - 1); + WARN_ON_ONCE(prev_errata == landlock_errata); + } +} + static int __init landlock_init(void) { + compute_errata(); landlock_add_cred_hooks(); landlock_add_ptrace_hooks(); landlock_add_fs_hooks(); diff --git a/security/landlock/setup.h b/security/landlock/setup.h index 1daffab1ab4b..420dceca35d2 100644 --- a/security/landlock/setup.h +++ b/security/landlock/setup.h @@ -11,7 +11,10 @@ #include +extern const int landlock_abi_version; + extern bool landlock_initialized; +extern int landlock_errata; extern struct lsm_blob_sizes landlock_blob_sizes; diff --git a/security/landlock/syscalls.c b/security/landlock/syscalls.c index d0cb3d0cbf98..7590a74a55e5 100644 --- a/security/landlock/syscalls.c +++ b/security/landlock/syscalls.c @@ -150,7 +150,9 @@ static const struct file_operations ruleset_fops = { * the new ruleset. * @size: Size of the pointed &struct landlock_ruleset_attr (needed for * backward and forward compatibility). - * @flags: Supported value: %LANDLOCK_CREATE_RULESET_VERSION. + * @flags: Supported value: + * - %LANDLOCK_CREATE_RULESET_VERSION + * - %LANDLOCK_CREATE_RULESET_ERRATA * * This system call enables to create a new Landlock ruleset, and returns the * related file descriptor on success. @@ -159,6 +161,10 @@ static const struct file_operations ruleset_fops = { * 0, then the returned value is the highest supported Landlock ABI version * (starting at 1). * + * If @flags is %LANDLOCK_CREATE_RULESET_ERRATA and @attr is NULL and @size is + * 0, then the returned value is a bitmask of fixed issues for the current + * Landlock ABI version. + * * Possible returned errors are: * * - %EOPNOTSUPP: Landlock is supported by the kernel but disabled at boot time; @@ -181,9 +187,15 @@ SYSCALL_DEFINE3(landlock_create_ruleset, return -EOPNOTSUPP; if (flags) { - if ((flags == LANDLOCK_CREATE_RULESET_VERSION) && !attr && - !size) - return LANDLOCK_ABI_VERSION; + if (attr || size) + return -EINVAL; + + if (flags == LANDLOCK_CREATE_RULESET_VERSION) + return landlock_abi_version; + + if (flags == LANDLOCK_CREATE_RULESET_ERRATA) + return landlock_errata; + return -EINVAL; } @@ -213,6 +225,8 @@ SYSCALL_DEFINE3(landlock_create_ruleset, return ruleset_fd; } +const int landlock_abi_version = LANDLOCK_ABI_VERSION; + /* * Returns an owned ruleset from a FD. It is thus needed to call * landlock_put_ruleset() on the return value. diff --git a/security/safesetid/securityfs.c b/security/safesetid/securityfs.c index 25310468bcdd..8e1ffd70b18a 100644 --- a/security/safesetid/securityfs.c +++ b/security/safesetid/securityfs.c @@ -143,6 +143,9 @@ static ssize_t handle_policy_update(struct file *file, char *buf, *p, *end; int err; + if (len >= KMALLOC_MAX_SIZE) + return -EINVAL; + pol = kmalloc(sizeof(struct setid_ruleset), GFP_KERNEL); if (!pol) return -ENOMEM; diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 2b8ebd390e37..25599cd5d6ae 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -969,7 +969,10 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd, xpermd->driver)) return; } else { - BUG(); + pr_warn_once( + "SELinux: unknown extended permission (%u) will be ignored\n", + node->datum.u.xperms->specified); + return; } if (node->key.specified == AVTAB_XPERMS_ALLOWED) { @@ -1006,7 +1009,8 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd, node->datum.u.xperms->perms.p[i]; } } else { - BUG(); + pr_warn_once("SELinux: unknown specified key (%u)\n", + node->key.specified); } } diff --git a/security/smack/smack.h b/security/smack/smack.h index aa15ff56ed6e..eb22090f3bb8 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -152,6 +152,7 @@ struct smk_net4addr { struct smack_known *smk_label; /* label */ }; +#if IS_ENABLED(CONFIG_IPV6) /* * An entry in the table identifying IPv6 hosts. */ @@ -162,7 +163,9 @@ struct smk_net6addr { int smk_masks; /* mask size */ struct smack_known *smk_label; /* label */ }; +#endif /* CONFIG_IPV6 */ +#ifdef SMACK_IPV6_PORT_LABELING /* * An entry in the table identifying ports. */ @@ -175,6 +178,7 @@ struct smk_port_label { short smk_sock_type; /* Socket type */ short smk_can_reuse; }; +#endif /* SMACK_IPV6_PORT_LABELING */ struct smack_known_list_elem { struct list_head list; @@ -314,7 +318,9 @@ extern struct smack_known smack_known_web; extern struct mutex smack_known_lock; extern struct list_head smack_known_list; extern struct list_head smk_net4addr_list; +#if IS_ENABLED(CONFIG_IPV6) extern struct list_head smk_net6addr_list; +#endif /* CONFIG_IPV6 */ extern struct mutex smack_onlycap_lock; extern struct list_head smack_onlycap_list; diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 25995df15e82..285103ffc75c 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2405,6 +2405,7 @@ static struct smack_known *smack_ipv4host_label(struct sockaddr_in *sip) return NULL; } +#if IS_ENABLED(CONFIG_IPV6) /* * smk_ipv6_localhost - Check for local ipv6 host address * @sip: the address @@ -2472,6 +2473,7 @@ static struct smack_known *smack_ipv6host_label(struct sockaddr_in6 *sip) return NULL; } +#endif /* CONFIG_IPV6 */ /** * smack_netlbl_add - Set the secattr on a socket @@ -2575,6 +2577,7 @@ static int smk_ipv4_check(struct sock *sk, struct sockaddr_in *sap) return rc; } +#if IS_ENABLED(CONFIG_IPV6) /** * smk_ipv6_check - check Smack access * @subject: subject Smack label @@ -2607,6 +2610,7 @@ static int smk_ipv6_check(struct smack_known *subject, rc = smk_bu_note("IPv6 check", subject, object, MAY_WRITE, rc); return rc; } +#endif /* CONFIG_IPV6 */ #ifdef SMACK_IPV6_PORT_LABELING /** @@ -2939,7 +2943,9 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return 0; if (addrlen < offsetofend(struct sockaddr, sa_family)) return 0; - if (IS_ENABLED(CONFIG_IPV6) && sap->sa_family == AF_INET6) { + +#if IS_ENABLED(CONFIG_IPV6) + if (sap->sa_family == AF_INET6) { struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; struct smack_known *rsp = NULL; @@ -2959,6 +2965,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, return rc; } +#endif /* CONFIG_IPV6 */ + if (sap->sa_family != AF_INET || addrlen < sizeof(struct sockaddr_in)) return 0; rc = smk_ipv4_check(sock->sk, (struct sockaddr_in *)sap); diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index d955f3dcb3a5..9dca3672d82b 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -922,6 +922,10 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf, if (rc >= 0) { old_cat = skp->smk_netlabel.attr.mls.cat; rcu_assign_pointer(skp->smk_netlabel.attr.mls.cat, ncats.attr.mls.cat); + if (ncats.attr.mls.cat) + skp->smk_netlabel.flags |= NETLBL_SECATTR_MLS_CAT; + else + skp->smk_netlabel.flags &= ~(u32)NETLBL_SECATTR_MLS_CAT; skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; synchronize_rcu(); netlbl_catmap_free(old_cat); diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index a7af085550b2..5f1cdd0af115 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -2664,7 +2664,7 @@ ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, if (head->w.avail >= head->writebuf_size - 1) { const int len = head->writebuf_size * 2; - char *cp = kzalloc(len, GFP_NOFS); + char *cp = kzalloc(len, GFP_NOFS | __GFP_NOWARN); if (!cp) { error = -ENOMEM; diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index ac2efeb63a39..2b9fada97532 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -1085,8 +1085,7 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) runtime->oss.params = 0; runtime->oss.prepare = 1; runtime->oss.buffer_used = 0; - if (runtime->dma_area) - snd_pcm_format_set_silence(runtime->format, runtime->dma_area, bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_runtime_buffer_set_silence(runtime); runtime->oss.period_frames = snd_pcm_alsa_frames(substream, oss_period_size); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 2b73518e5e31..bf752b188b05 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -703,6 +703,17 @@ static void snd_pcm_buffer_access_unlock(struct snd_pcm_runtime *runtime) atomic_inc(&runtime->buffer_accessing); } +/* fill the PCM buffer with the current silence format; called from pcm_oss.c */ +void snd_pcm_runtime_buffer_set_silence(struct snd_pcm_runtime *runtime) +{ + snd_pcm_buffer_access_lock(runtime); + if (runtime->dma_area) + snd_pcm_format_set_silence(runtime->format, runtime->dma_area, + bytes_to_samples(runtime, runtime->dma_bytes)); + snd_pcm_buffer_access_unlock(runtime); +} +EXPORT_SYMBOL_GPL(snd_pcm_runtime_buffer_set_silence); + #if IS_ENABLED(CONFIG_SND_PCM_OSS) #define is_oss_stream(substream) ((substream)->oss.oss) #else @@ -3785,9 +3796,11 @@ static vm_fault_t snd_pcm_mmap_data_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; if (substream->ops->page) page = substream->ops->page(substream, offset); - else if (!snd_pcm_get_dma_buf(substream)) + else if (!snd_pcm_get_dma_buf(substream)) { + if (WARN_ON_ONCE(!runtime->dma_area)) + return VM_FAULT_SIGBUS; page = virt_to_page(runtime->dma_area + offset); - else + } else page = snd_sgbuf_get_page(snd_pcm_get_dma_buf(substream), offset); if (!page) return VM_FAULT_SIGBUS; diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c index e3394919daa0..51ee4c00a843 100644 --- a/sound/core/seq/oss/seq_oss_synth.c +++ b/sound/core/seq/oss/seq_oss_synth.c @@ -66,6 +66,7 @@ static struct seq_oss_synth midi_synth_dev = { }; static DEFINE_SPINLOCK(register_lock); +static DEFINE_MUTEX(sysex_mutex); /* * prototypes @@ -497,6 +498,7 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf, if (!info) return -ENXIO; + guard(mutex)(&sysex_mutex); sysex = info->sysex; if (sysex == NULL) { sysex = kzalloc(sizeof(*sysex), GFP_KERNEL); diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 2d707afa1ef1..1252ea7ad55e 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -1140,8 +1140,7 @@ static __poll_t snd_seq_poll(struct file *file, poll_table * wait) if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) { /* check if data is available in the pool */ - if (!snd_seq_write_pool_allocated(client) || - snd_seq_pool_poll_wait(client->pool, file, wait)) + if (snd_seq_pool_poll_wait(client->pool, file, wait)) mask |= EPOLLOUT | EPOLLWRNORM; } @@ -2382,8 +2381,6 @@ int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table if (client == NULL) return -ENXIO; - if (! snd_seq_write_pool_allocated(client)) - return 1; if (snd_seq_pool_poll_wait(client->pool, file, wait)) return 1; return 0; diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index 47ef6bc30c0e..e30b92d85079 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c @@ -366,6 +366,7 @@ int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait) { poll_wait(file, &pool->output_sleep, wait); + guard(spinlock_irq)(&pool->lock); return snd_seq_output_ok(pool); } diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index 5ada28b5515c..b02c45e939e7 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -675,6 +675,10 @@ static const struct config_entry acpi_config_table[] = { #if IS_ENABLED(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI) || \ IS_ENABLED(CONFIG_SND_SOC_SOF_BAYTRAIL) /* BayTrail */ + { + .flags = FLAG_SST_OR_SOF_BYT, + .acpi_hid = "LPE0F28", + }, { .flags = FLAG_SST_OR_SOF_BYT, .acpi_hid = "80860F28", diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c index 4a7e20bb11bc..802c5ed4a5ba 100644 --- a/sound/pci/es1968.c +++ b/sound/pci/es1968.c @@ -1569,7 +1569,7 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream) struct snd_pcm_runtime *runtime = substream->runtime; struct es1968 *chip = snd_pcm_substream_chip(substream); struct esschan *es; - int apu1, apu2; + int err, apu1, apu2; apu1 = snd_es1968_alloc_apu_pair(chip, ESM_APU_PCM_CAPTURE); if (apu1 < 0) @@ -1613,7 +1613,9 @@ static int snd_es1968_capture_open(struct snd_pcm_substream *substream) runtime->hw = snd_es1968_capture; runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = calc_available_memory_size(chip) - 1024; /* keep MIXBUF size */ - snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES); + err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES); + if (err < 0) + return err; spin_lock_irq(&chip->substream_lock); list_add(&es->list, &chip->substream_list); diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig index d29d8372a3c0..fa083b01aed5 100644 --- a/sound/pci/hda/Kconfig +++ b/sound/pci/hda/Kconfig @@ -132,6 +132,7 @@ comment "Set to Y if you want auto-loading the side codec driver" config SND_HDA_CODEC_REALTEK tristate "Build Realtek HD-audio codec support" + depends on INPUT select SND_HDA_GENERIC select SND_HDA_GENERIC_LEDS help diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 7c6b1fe8dfcc..58b2e25c448e 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -80,7 +80,11 @@ static int compare_input_type(const void *ap, const void *bp) /* In case one has boost and the other one has not, pick the one with boost first. */ - return (int)(b->has_boost_on_pin - a->has_boost_on_pin); + if (a->has_boost_on_pin != b->has_boost_on_pin) + return (int)(b->has_boost_on_pin - a->has_boost_on_pin); + + /* Keep the original order */ + return a->order - b->order; } /* Reorder the surround channels @@ -400,6 +404,8 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec, reorder_outputs(cfg->speaker_outs, cfg->speaker_pins); /* sort inputs in the order of AUTO_PIN_* type */ + for (i = 0; i < cfg->num_inputs; i++) + cfg->inputs[i].order = i; sort(cfg->inputs, cfg->num_inputs, sizeof(cfg->inputs[0]), compare_input_type, NULL); diff --git a/sound/pci/hda/hda_auto_parser.h b/sound/pci/hda/hda_auto_parser.h index df63d66af1ab..8bb8202cf284 100644 --- a/sound/pci/hda/hda_auto_parser.h +++ b/sound/pci/hda/hda_auto_parser.h @@ -35,6 +35,7 @@ struct auto_pin_cfg_item { unsigned int is_headset_mic:1; unsigned int is_headphone_mic:1; /* Mic-only in headphone jack */ unsigned int has_boost_on_pin:1; + int order; }; struct auto_pin_cfg; diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c index e63621bcb214..1a684e47d4d1 100644 --- a/sound/pci/hda/hda_beep.c +++ b/sound/pci/hda/hda_beep.c @@ -31,8 +31,9 @@ static void generate_tone(struct hda_beep *beep, int tone) beep->power_hook(beep, true); beep->playing = 1; } - snd_hda_codec_write(codec, beep->nid, 0, - AC_VERB_SET_BEEP_CONTROL, tone); + if (!codec->beep_just_power_on) + snd_hda_codec_write(codec, beep->nid, 0, + AC_VERB_SET_BEEP_CONTROL, tone); if (!tone && beep->playing) { beep->playing = 0; if (beep->power_hook) @@ -212,10 +213,12 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid) struct hda_beep *beep; int err; - if (!snd_hda_get_bool_hint(codec, "beep")) - return 0; /* disabled explicitly by hints */ - if (codec->beep_mode == HDA_BEEP_MODE_OFF) - return 0; /* disabled by module option */ + if (!codec->beep_just_power_on) { + if (!snd_hda_get_bool_hint(codec, "beep")) + return 0; /* disabled explicitly by hints */ + if (codec->beep_mode == HDA_BEEP_MODE_OFF) + return 0; /* disabled by module option */ + } beep = kzalloc(sizeof(*beep), GFP_KERNEL); if (beep == NULL) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index bd19f92aeeec..9d7d99b584fe 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2465,7 +2465,9 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec, break; id = kctl->id; id.index = spdif_index; - snd_ctl_rename_id(codec->card, &kctl->id, &id); + err = snd_ctl_rename_id(codec->card, &kctl->id, &id); + if (err < 0) + return err; } bus->primary_dig_out_type = HDA_PCM_TYPE_HDMI; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index b8d769b2d0f9..5eeea9a4d201 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -37,6 +37,7 @@ #include #include #include +#include #ifdef CONFIG_X86 /* for snoop control */ @@ -1366,8 +1367,21 @@ static void azx_free(struct azx *chip) if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); - if (hda->vga_switcheroo_registered) + if (hda->vga_switcheroo_registered) { vga_switcheroo_unregister_client(chip->pci); + + /* Some GPUs don't have sound, and azx_first_init fails, + * leaving the device probed but non-functional. As long + * as it's probed, the PCI subsystem keeps its runtime + * PM status as active. Force it to suspended (as we + * actually stop the chip) to allow GPU to suspend via + * vga_switcheroo, and print a warning. + */ + dev_warn(&pci->dev, "GPU sound probed, but not operational: please add a quirk to driver_denylist\n"); + pm_runtime_disable(&pci->dev); + pm_runtime_set_suspended(&pci->dev); + pm_runtime_enable(&pci->dev); + } } if (bus->chip_init) { @@ -2074,6 +2088,27 @@ static const struct pci_device_id driver_denylist[] = { {} }; +static struct pci_device_id driver_denylist_ideapad_z570[] = { + { PCI_DEVICE_SUB(0x10de, 0x0bea, 0x0000, 0x0000) }, /* NVIDIA GF108 HDA */ + {} +}; + +/* DMI-based denylist, to be used when: + * - PCI subsystem IDs are zero, impossible to distinguish from valid sound cards. + * - Different modifications of the same laptop use different GPU models. + */ +static const struct dmi_system_id driver_denylist_dmi[] = { + { + /* No HDA in NVIDIA DGPU. BIOS disables it, but quirk_nvidia_hda() reenables. */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"), + }, + .driver_data = &driver_denylist_ideapad_z570, + }, + {} +}; + static const struct hda_controller_ops pci_hda_ops = { .disable_msi_reset_irq = disable_msi_reset_irq, .position_check = azx_position_check, @@ -2084,6 +2119,7 @@ static DECLARE_BITMAP(probed_devs, SNDRV_CARDS); static int azx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { + const struct dmi_system_id *dmi; struct snd_card *card; struct hda_intel *hda; struct azx *chip; @@ -2096,6 +2132,12 @@ static int azx_probe(struct pci_dev *pci, return -ENODEV; } + dmi = dmi_first_match(driver_denylist_dmi); + if (dmi && pci_match_id(dmi->driver_data, pci)) { + dev_info(&pci->dev, "Skipping the device on the DMI denylist\n"); + return -ENODEV; + } + dev = find_first_zero_bit(probed_devs, SNDRV_CARDS); if (dev >= SNDRV_CARDS) return -ENODEV; @@ -2217,6 +2259,8 @@ static const struct snd_pci_quirk power_save_denylist[] = { SND_PCI_QUIRK(0x1631, 0xe017, "Packard Bell NEC IMEDIA 5204", 0), /* KONTRON SinglePC may cause a stall at runtime resume */ SND_PCI_QUIRK(0x1734, 0x1232, "KONTRON SinglePC", 0), + /* Dell ALC3271 */ + SND_PCI_QUIRK(0x1028, 0x0962, "Dell ALC3271", 0), {} }; #endif /* CONFIG_PM */ diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index a14b9cb48f69..a3d68b83ebd5 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -311,6 +311,7 @@ enum { CXT_FIXUP_HP_MIC_NO_PRESENCE, CXT_PINCFG_SWS_JS201D, CXT_PINCFG_TOP_SPEAKER, + CXT_FIXUP_HP_A_U, }; /* for hda_fixup_thinkpad_acpi() */ @@ -778,6 +779,18 @@ static void cxt_setup_mute_led(struct hda_codec *codec, } } +static void cxt_setup_gpio_unmute(struct hda_codec *codec, + unsigned int gpio_mute_mask) +{ + if (gpio_mute_mask) { + // set gpio data to 0. + snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 0); + snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_MASK, gpio_mute_mask); + snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DIRECTION, gpio_mute_mask); + snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_STICKY_MASK, 0); + } +} + static void cxt_fixup_mute_led_gpio(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -792,6 +805,15 @@ static void cxt_fixup_hp_zbook_mute_led(struct hda_codec *codec, cxt_setup_mute_led(codec, 0x10, 0x20); } +static void cxt_fixup_hp_a_u(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + // Init vers in BIOS mute the spk/hp by set gpio high to avoid pop noise, + // so need to unmute once by clearing the gpio data when runs into the system. + if (action == HDA_FIXUP_ACT_INIT) + cxt_setup_gpio_unmute(codec, 0x2); +} + /* ThinkPad X200 & co with cxt5051 */ static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = { { 0x16, 0x042140ff }, /* HP (seq# overridden) */ @@ -1016,6 +1038,10 @@ static const struct hda_fixup cxt_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = cxt_fixup_sirius_top_speaker, }, + [CXT_FIXUP_HP_A_U] = { + .type = HDA_FIXUP_FUNC, + .v.func = cxt_fixup_hp_a_u, + }, }; static const struct snd_pci_quirk cxt5045_fixups[] = { @@ -1072,6 +1098,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x8231, "HP ProBook 450 G4", CXT_FIXUP_MUTE_LED_GPIO), SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), @@ -1090,6 +1117,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), + SND_PCI_QUIRK(0x14f1, 0x0252, "MBX-Z60MR100", CXT_FIXUP_HP_A_U), SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D), SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), @@ -1135,6 +1163,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" }, { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" }, { .id = CXT_PINCFG_TOP_SPEAKER, .name = "sirius-top-speaker" }, + { .id = CXT_FIXUP_HP_A_U, .name = "HP-U-support" }, {} }; diff --git a/sound/pci/hda/patch_cs8409-tables.c b/sound/pci/hda/patch_cs8409-tables.c index b288874e401e..b2e1856ab891 100644 --- a/sound/pci/hda/patch_cs8409-tables.c +++ b/sound/pci/hda/patch_cs8409-tables.c @@ -121,7 +121,7 @@ static const struct cs8409_i2c_param cs42l42_init_reg_seq[] = { { CS42L42_MIXER_CHA_VOL, 0x3F }, { CS42L42_MIXER_CHB_VOL, 0x3F }, { CS42L42_MIXER_ADC_VOL, 0x3f }, - { CS42L42_HP_CTL, 0x03 }, + { CS42L42_HP_CTL, 0x0D }, { CS42L42_MIC_DET_CTL1, 0xB6 }, { CS42L42_TIPSENSE_CTL, 0xC2 }, { CS42L42_HS_CLAMP_DISABLE, 0x01 }, @@ -315,7 +315,7 @@ static const struct cs8409_i2c_param dolphin_c0_init_reg_seq[] = { { CS42L42_ASP_TX_SZ_EN, 0x01 }, { CS42L42_PWR_CTL1, 0x0A }, { CS42L42_PWR_CTL2, 0x84 }, - { CS42L42_HP_CTL, 0x03 }, + { CS42L42_HP_CTL, 0x0D }, { CS42L42_MIXER_CHA_VOL, 0x3F }, { CS42L42_MIXER_CHB_VOL, 0x3F }, { CS42L42_MIXER_ADC_VOL, 0x3f }, @@ -371,7 +371,7 @@ static const struct cs8409_i2c_param dolphin_c1_init_reg_seq[] = { { CS42L42_ASP_TX_SZ_EN, 0x00 }, { CS42L42_PWR_CTL1, 0x0E }, { CS42L42_PWR_CTL2, 0x84 }, - { CS42L42_HP_CTL, 0x01 }, + { CS42L42_HP_CTL, 0x0D }, { CS42L42_MIXER_CHA_VOL, 0x3F }, { CS42L42_MIXER_CHB_VOL, 0x3F }, { CS42L42_MIXER_ADC_VOL, 0x3f }, diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c index 892223d9e64a..b003ac1990ba 100644 --- a/sound/pci/hda/patch_cs8409.c +++ b/sound/pci/hda/patch_cs8409.c @@ -876,7 +876,7 @@ static void cs42l42_resume(struct sub_codec *cs42l42) { CS42L42_DET_INT_STATUS2, 0x00 }, { CS42L42_TSRS_PLUG_STATUS, 0x00 }, }; - int fsv_old, fsv_new; + unsigned int fsv; /* Bring CS42L42 out of Reset */ spec->gpio_data = snd_hda_codec_read(codec, CS8409_PIN_AFG, 0, AC_VERB_GET_GPIO_DATA, 0); @@ -893,13 +893,15 @@ static void cs42l42_resume(struct sub_codec *cs42l42) /* Clear interrupts, by reading interrupt status registers */ cs8409_i2c_bulk_read(cs42l42, irq_regs, ARRAY_SIZE(irq_regs)); - fsv_old = cs8409_i2c_read(cs42l42, CS42L42_HP_CTL); - if (cs42l42->full_scale_vol == CS42L42_FULL_SCALE_VOL_0DB) - fsv_new = fsv_old & ~CS42L42_FULL_SCALE_VOL_MASK; - else - fsv_new = fsv_old & CS42L42_FULL_SCALE_VOL_MASK; - if (fsv_new != fsv_old) - cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv_new); + fsv = cs8409_i2c_read(cs42l42, CS42L42_HP_CTL); + if (cs42l42->full_scale_vol) { + // Set the full scale volume bit + fsv |= CS42L42_FULL_SCALE_VOL_MASK; + cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv); + } + // Unmute analog channels A and B + fsv = (fsv & ~CS42L42_ANA_MUTE_AB); + cs8409_i2c_write(cs42l42, CS42L42_HP_CTL, fsv); /* we have to explicitly allow unsol event handling even during the * resume phase so that the jack event is processed properly @@ -921,7 +923,7 @@ static void cs42l42_suspend(struct sub_codec *cs42l42) { CS42L42_MIXER_CHA_VOL, 0x3F }, { CS42L42_MIXER_ADC_VOL, 0x3F }, { CS42L42_MIXER_CHB_VOL, 0x3F }, - { CS42L42_HP_CTL, 0x0F }, + { CS42L42_HP_CTL, 0x0D }, { CS42L42_ASP_RX_DAI0_EN, 0x00 }, { CS42L42_ASP_CLK_CFG, 0x00 }, { CS42L42_PWR_CTL1, 0xFE }, diff --git a/sound/pci/hda/patch_cs8409.h b/sound/pci/hda/patch_cs8409.h index 937e9387abdc..bca81d49f201 100644 --- a/sound/pci/hda/patch_cs8409.h +++ b/sound/pci/hda/patch_cs8409.h @@ -230,9 +230,10 @@ enum cs8409_coefficient_index_registers { #define CS42L42_PDN_TIMEOUT_US (250000) #define CS42L42_PDN_SLEEP_US (2000) #define CS42L42_INIT_TIMEOUT_MS (45) +#define CS42L42_ANA_MUTE_AB (0x0C) #define CS42L42_FULL_SCALE_VOL_MASK (2) -#define CS42L42_FULL_SCALE_VOL_0DB (1) -#define CS42L42_FULL_SCALE_VOL_MINUS6DB (0) +#define CS42L42_FULL_SCALE_VOL_0DB (0) +#define CS42L42_FULL_SCALE_VOL_MINUS6DB (1) /* Dell BULLSEYE / WARLOCK / CYBORG Specific Definitions */ diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 1b5527cb59a9..115c675819e6 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -24,6 +24,7 @@ #include #include "hda_local.h" #include "hda_auto_parser.h" +#include "hda_beep.h" #include "hda_jack.h" #include "hda_generic.h" #include "hda_component.h" @@ -469,6 +470,8 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) break; case 0x10ec0234: case 0x10ec0274: + alc_write_coef_idx(codec, 0x6e, 0x0c25); + fallthrough; case 0x10ec0294: case 0x10ec0700: case 0x10ec0701: @@ -580,6 +583,9 @@ static void alc_shutup_pins(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; + if (spec->no_shutup_pins) + return; + switch (codec->core.vendor_id) { case 0x10ec0236: case 0x10ec0256: @@ -595,8 +601,7 @@ static void alc_shutup_pins(struct hda_codec *codec) alc_headset_mic_no_shutup(codec); break; default: - if (!spec->no_shutup_pins) - snd_hda_shutup_pins(codec); + snd_hda_shutup_pins(codec); break; } } @@ -3599,25 +3604,22 @@ static void alc256_init(struct hda_codec *codec) hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); - if (hp_pin_sense) + if (hp_pin_sense) { msleep(2); + alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ - alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ - - snd_hda_codec_write(codec, hp_pin, 0, - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); - - if (hp_pin_sense || spec->ultra_low_power) - msleep(85); - - snd_hda_codec_write(codec, hp_pin, 0, + snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); - if (hp_pin_sense || spec->ultra_low_power) - msleep(100); + msleep(75); + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + + msleep(75); + alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ + } alc_update_coef_idx(codec, 0x46, 3 << 12, 0); - alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 1 << 15); /* Clear bit */ alc_update_coefex_idx(codec, 0x53, 0x02, 0x8000, 0 << 15); /* @@ -3641,29 +3643,28 @@ static void alc256_shutup(struct hda_codec *codec) alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin); - if (hp_pin_sense) + if (hp_pin_sense) { msleep(2); - snd_hda_codec_write(codec, hp_pin, 0, + snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); - if (hp_pin_sense || spec->ultra_low_power) - msleep(85); + msleep(75); /* 3k pull low control for Headset jack. */ /* NOTE: call this before clearing the pin, otherwise codec stalls */ /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly * when booting with headset plugged. So skip setting it for the codec alc257 */ - if (spec->en_3kpull_low) - alc_update_coef_idx(codec, 0x46, 0, 3 << 12); + if (spec->en_3kpull_low) + alc_update_coef_idx(codec, 0x46, 0, 3 << 12); - if (!spec->no_shutup_pins) - snd_hda_codec_write(codec, hp_pin, 0, + if (!spec->no_shutup_pins) + snd_hda_codec_write(codec, hp_pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); - if (hp_pin_sense || spec->ultra_low_power) - msleep(100); + msleep(75); + } alc_auto_setup_eapd(codec, false); alc_shutup_pins(codec); @@ -3758,33 +3759,29 @@ static void alc225_init(struct hda_codec *codec) hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); hp2_pin_sense = snd_hda_jack_detect(codec, 0x16); - if (hp1_pin_sense || hp2_pin_sense) + if (hp1_pin_sense || hp2_pin_sense) { msleep(2); + alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ - alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */ + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x16, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + msleep(75); - if (hp1_pin_sense || spec->ultra_low_power) - snd_hda_codec_write(codec, hp_pin, 0, - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); - if (hp2_pin_sense) - snd_hda_codec_write(codec, 0x16, 0, - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x16, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); - if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power) - msleep(85); - - if (hp1_pin_sense || spec->ultra_low_power) - snd_hda_codec_write(codec, hp_pin, 0, - AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); - if (hp2_pin_sense) - snd_hda_codec_write(codec, 0x16, 0, - AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); - - if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power) - msleep(100); - - alc_update_coef_idx(codec, 0x4a, 3 << 10, 0); - alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ + msleep(75); + alc_update_coef_idx(codec, 0x4a, 3 << 10, 0); + alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x4); /* Hight power */ + } } static void alc225_shutup(struct hda_codec *codec) @@ -3796,36 +3793,35 @@ static void alc225_shutup(struct hda_codec *codec) if (!hp_pin) hp_pin = 0x21; - alc_disable_headset_jack_key(codec); - /* 3k pull low control for Headset jack. */ - alc_update_coef_idx(codec, 0x4a, 0, 3 << 10); - hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); hp2_pin_sense = snd_hda_jack_detect(codec, 0x16); - if (hp1_pin_sense || hp2_pin_sense) + if (hp1_pin_sense || hp2_pin_sense) { + alc_disable_headset_jack_key(codec); + /* 3k pull low control for Headset jack. */ + alc_update_coef_idx(codec, 0x4a, 0, 3 << 10); msleep(2); - if (hp1_pin_sense || spec->ultra_low_power) - snd_hda_codec_write(codec, hp_pin, 0, - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); - if (hp2_pin_sense) - snd_hda_codec_write(codec, 0x16, 0, - AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x16, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); - if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power) - msleep(85); + msleep(75); - if (hp1_pin_sense || spec->ultra_low_power) - snd_hda_codec_write(codec, hp_pin, 0, - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); - if (hp2_pin_sense) - snd_hda_codec_write(codec, 0x16, 0, - AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); - - if (hp1_pin_sense || hp2_pin_sense || spec->ultra_low_power) - msleep(100); + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x16, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + msleep(75); + alc_update_coef_idx(codec, 0x4a, 3 << 10, 0); + alc_enable_headset_jack_key(codec); + } alc_auto_setup_eapd(codec, false); alc_shutup_pins(codec); if (spec->ultra_low_power) { @@ -3836,9 +3832,79 @@ static void alc225_shutup(struct hda_codec *codec) alc_update_coef_idx(codec, 0x4a, 3<<4, 2<<4); msleep(30); } +} - alc_update_coef_idx(codec, 0x4a, 3 << 10, 0); - alc_enable_headset_jack_key(codec); +static void alc222_init(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + hda_nid_t hp_pin = alc_get_hp_pin(spec); + bool hp1_pin_sense, hp2_pin_sense; + + if (!hp_pin) + return; + + msleep(30); + + hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); + hp2_pin_sense = snd_hda_jack_detect(codec, 0x14); + + if (hp1_pin_sense || hp2_pin_sense) { + msleep(2); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT); + msleep(75); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); + + msleep(75); + } +} + +static void alc222_shutup(struct hda_codec *codec) +{ + struct alc_spec *spec = codec->spec; + hda_nid_t hp_pin = alc_get_hp_pin(spec); + bool hp1_pin_sense, hp2_pin_sense; + + if (!hp_pin) + hp_pin = 0x21; + + hp1_pin_sense = snd_hda_jack_detect(codec, hp_pin); + hp2_pin_sense = snd_hda_jack_detect(codec, 0x14); + + if (hp1_pin_sense || hp2_pin_sense) { + msleep(2); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); + + msleep(75); + + if (hp1_pin_sense) + snd_hda_codec_write(codec, hp_pin, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + if (hp2_pin_sense) + snd_hda_codec_write(codec, 0x14, 0, + AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); + + msleep(75); + } + alc_auto_setup_eapd(codec, false); + alc_shutup_pins(codec); } static void alc_default_init(struct hda_codec *codec) @@ -4717,6 +4783,21 @@ static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec, } } +static void alc295_fixup_hp_mute_led_coefbit11(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + spec->mute_led_polarity = 0; + spec->mute_led_coef.idx = 0xb; + spec->mute_led_coef.mask = 3 << 3; + spec->mute_led_coef.on = 1 << 3; + spec->mute_led_coef.off = 1 << 4; + snd_hda_gen_add_mute_led_cdev(codec, coef_mute_led_set); + } +} + static void alc285_fixup_hp_mute_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -4799,7 +4880,6 @@ static void alc298_fixup_samsung_amp(struct hda_codec *codec, } } -#if IS_REACHABLE(CONFIG_INPUT) static void gpio2_mic_hotkey_event(struct hda_codec *codec, struct hda_jack_callback *event) { @@ -4908,10 +4988,6 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec, spec->kb_dev = NULL; } } -#else /* INPUT */ -#define alc280_fixup_hp_gpio2_mic_hotkey NULL -#define alc233_fixup_lenovo_line2_mic_hotkey NULL -#endif /* INPUT */ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, const struct hda_fixup *fix, int action) @@ -4925,6 +5001,16 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, } } +static void alc233_fixup_lenovo_low_en_micmute_led(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) + spec->micmute_led_polarity = 1; + alc233_fixup_lenovo_line2_mic_hotkey(codec, fix, action); +} + static void alc_hp_mute_disable(struct hda_codec *codec, unsigned int delay) { if (delay <= 0) @@ -6701,6 +6787,41 @@ static void alc285_fixup_hp_spectre_x360_eb1(struct hda_codec *codec, } } +/* GPIO1 = amplifier on/off */ +static void alc285_fixup_hp_spectre_x360_df1(struct hda_codec *codec, + const struct hda_fixup *fix, + int action) +{ + struct alc_spec *spec = codec->spec; + static const hda_nid_t conn[] = { 0x02 }; + static const struct hda_pintbl pincfgs[] = { + { 0x14, 0x90170110 }, /* front/high speakers */ + { 0x17, 0x90170130 }, /* back/bass speakers */ + { } + }; + + // enable mute led + alc285_fixup_hp_mute_led_coefbit(codec, fix, action); + + switch (action) { + case HDA_FIXUP_ACT_PRE_PROBE: + /* needed for amp of back speakers */ + spec->gpio_mask |= 0x01; + spec->gpio_dir |= 0x01; + snd_hda_apply_pincfgs(codec, pincfgs); + /* share DAC to have unified volume control */ + snd_hda_override_conn_list(codec, 0x14, ARRAY_SIZE(conn), conn); + snd_hda_override_conn_list(codec, 0x17, ARRAY_SIZE(conn), conn); + break; + case HDA_FIXUP_ACT_INIT: + /* need to toggle GPIO to enable the amp of back speakers */ + alc_update_gpio_data(codec, 0x01, true); + msleep(100); + alc_update_gpio_data(codec, 0x01, false); + break; + } +} + static void alc285_fixup_hp_spectre_x360(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -6773,6 +6894,30 @@ static void alc285_fixup_hp_envy_x360(struct hda_codec *codec, } } +static void alc285_fixup_hp_beep(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + codec->beep_just_power_on = true; + } else if (action == HDA_FIXUP_ACT_INIT) { +#ifdef CONFIG_SND_HDA_INPUT_BEEP + /* + * Just enable loopback to internal speaker and headphone jack. + * Disable amplification to get about the same beep volume as + * was on pure BIOS setup before loading the driver. + */ + alc_update_coef_idx(codec, 0x36, 0x7070, BIT(13)); + + snd_hda_enable_beep_device(codec, 1); + +#if !IS_ENABLED(CONFIG_INPUT_PCSPKR) + dev_warn_once(hda_codec_dev(codec), + "enable CONFIG_INPUT_PCSPKR to get PC beeps\n"); +#endif +#endif + } +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -7188,8 +7333,11 @@ enum { ALC290_FIXUP_MONO_SPEAKERS_HSJACK, ALC290_FIXUP_SUBWOOFER, ALC290_FIXUP_SUBWOOFER_HSJACK, + ALC295_FIXUP_HP_MUTE_LED_COEFBIT11, ALC269_FIXUP_THINKPAD_ACPI, ALC269_FIXUP_DMIC_THINKPAD_ACPI, + ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13, + ALC269VC_FIXUP_INFINIX_Y4_MAX, ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO, ALC255_FIXUP_ACER_MIC_NO_PRESENCE, ALC255_FIXUP_ASUS_MIC_NO_PRESENCE, @@ -7213,6 +7361,7 @@ enum { ALC280_FIXUP_HP_9480M, ALC245_FIXUP_HP_X360_AMP, ALC285_FIXUP_HP_SPECTRE_X360_EB1, + ALC285_FIXUP_HP_SPECTRE_X360_DF1, ALC285_FIXUP_HP_ENVY_X360, ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -7228,6 +7377,7 @@ enum { ALC275_FIXUP_DELL_XPS, ALC293_FIXUP_LENOVO_SPK_NOISE, ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY, + ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED, ALC255_FIXUP_DELL_SPK_NOISE, ALC225_FIXUP_DISABLE_MIC_VREF, ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -7311,6 +7461,7 @@ enum { ALC285_FIXUP_HP_GPIO_LED, ALC285_FIXUP_HP_MUTE_LED, ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED, + ALC285_FIXUP_HP_BEEP_MICMUTE_LED, ALC236_FIXUP_HP_MUTE_LED_COEFBIT2, ALC236_FIXUP_HP_GPIO_LED, ALC236_FIXUP_HP_MUTE_LED, @@ -7542,6 +7693,25 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc269_fixup_pincfg_U7x7_headset_mic, }, + [ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x14, 0x90170151 }, /* use as internal speaker (LFE) */ + { 0x1b, 0x90170152 }, /* use as internal speaker (back) */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST + }, + [ALC269VC_FIXUP_INFINIX_Y4_MAX] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x1b, 0x90170150 }, /* use as internal speaker */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_LIMIT_INT_MIC_BOOST + }, [ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -8169,6 +8339,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_fixup_lenovo_line2_mic_hotkey, }, + [ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc233_fixup_lenovo_low_en_micmute_led, + }, [ALC233_FIXUP_INTEL_NUC8_DMIC] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic, @@ -8835,6 +9009,12 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_mute_led, }, + [ALC285_FIXUP_HP_BEEP_MICMUTE_LED] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_beep, + .chained = true, + .chain_id = ALC285_FIXUP_HP_MUTE_LED, + }, [ALC236_FIXUP_HP_MUTE_LED_COEFBIT2] = { .type = HDA_FIXUP_FUNC, .v.func = alc236_fixup_hp_mute_led_coefbit2, @@ -8857,6 +9037,10 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC283_FIXUP_INT_MIC, }, + [ALC295_FIXUP_HP_MUTE_LED_COEFBIT11] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc295_fixup_hp_mute_led_coefbit11, + }, [ALC298_FIXUP_SAMSUNG_AMP] = { .type = HDA_FIXUP_FUNC, .v.func = alc298_fixup_samsung_amp, @@ -9174,6 +9358,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_spectre_x360_eb1 }, + [ALC285_FIXUP_HP_SPECTRE_X360_DF1] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc285_fixup_hp_spectre_x360_df1 + }, [ALC285_FIXUP_HP_ENVY_X360] = { .type = HDA_FIXUP_FUNC, .v.func = alc285_fixup_hp_envy_x360, @@ -9573,6 +9761,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1360, "Acer Aspire A115", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x141f, "Acer Spin SP513-54N", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x142b, "Acer Swift SF314-42", ALC255_FIXUP_ACER_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), @@ -9725,6 +9914,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), SND_PCI_QUIRK(0x103c, 0x8519, "HP Spectre x360 15-df0xxx", ALC285_FIXUP_HP_SPECTRE_X360), SND_PCI_QUIRK(0x103c, 0x8537, "HP ProBook 440 G6", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x85c6, "HP Pavilion x360 Convertible 14-dy1xxx", ALC295_FIXUP_HP_MUTE_LED_COEFBIT11), SND_PCI_QUIRK(0x103c, 0x85de, "HP Envy x360 13-ar0xxx", ALC285_FIXUP_HP_ENVY_X360), SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -9732,6 +9922,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO), SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), + SND_PCI_QUIRK(0x103c, 0x863e, "HP Spectre x360 15-df1xxx", ALC285_FIXUP_HP_SPECTRE_X360_DF1), SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), @@ -9742,7 +9933,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8735, "HP ProBook 435 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), - SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8760, "HP EliteBook 8{4,5}5 G7", ALC285_FIXUP_HP_BEEP_MICMUTE_LED), SND_PCI_QUIRK(0x103c, 0x876e, "HP ENVY x360 Convertible 13-ay0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), @@ -9756,6 +9947,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87d3, "HP Laptop 15-gw0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x87df, "HP ProBook 430 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x87f1, "HP ProBook 630 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), @@ -9771,6 +9963,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x881d, "HP 250 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x881e, "HP Laptop 15s-du3xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), @@ -9781,6 +9974,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8870, "HP ZBook Fury 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x8873, "HP ZBook Studio 15.6 Inch G8 Mobile Workstation PC", ALC285_FIXUP_HP_GPIO_AMP_INIT), SND_PCI_QUIRK(0x103c, 0x887a, "HP Laptop 15s-eq2xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x887c, "HP Laptop 14s-fq1xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x888a, "HP ENVY x360 Convertible 15-eu0xxx", ALC245_FIXUP_HP_X360_MUTE_LEDS), SND_PCI_QUIRK(0x103c, 0x888d, "HP ZBook Power 15.6 inch G8 Mobile Workstation PC", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8895, "HP EliteBook 855 G8 Notebook PC", ALC285_FIXUP_HP_SPEAKERS_MICMUTE_LED), @@ -9840,6 +10034,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8b5f, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), @@ -9886,14 +10081,24 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d01, "HP ZBook Power 14 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d84, "HP EliteBook X G1i", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d91, "HP ZBook Firefly 14 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8d92, "HP ZBook Firefly 16 G12", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8e18, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8e19, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8e1a, "HP ZBook Firefly 14 G12A", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), + SND_PCI_QUIRK(0x1043, 0x1054, "ASUS G614FH/FM/FP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1074, "ASUS G614PH/PM/PP", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x10d3, "ASUS K6500ZC", ALC294_FIXUP_ASUS_SPK), SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x1043, 0x1194, "ASUS UM3406KA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), @@ -9939,6 +10144,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1c80, "ASUS VivoBook TP401", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS), SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), @@ -9957,7 +10163,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2), + SND_PCI_QUIRK(0x1043, 0x1f63, "ASUS P5405CSA", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x1fb3, "ASUS ROG Flow Z13 GZ302EA", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2), SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2), @@ -10004,6 +10212,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xca06, "Samsung Galaxy Book3 360 (NP730QFG)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), @@ -10147,6 +10356,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), SND_PCI_QUIRK(0x17aa, 0x334b, "Lenovo ThinkCentre M70 Gen5", ALC283_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x17aa, 0x3384, "ThinkCentre M90a PRO", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), + SND_PCI_QUIRK(0x17aa, 0x3386, "ThinkCentre M90a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), + SND_PCI_QUIRK(0x17aa, 0x3387, "ThinkCentre M70a Gen6", ALC233_FIXUP_LENOVO_L2MH_LOW_ENLED), SND_PCI_QUIRK(0x17aa, 0x3801, "Lenovo Yoga9 14IAP7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), @@ -10167,6 +10379,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3866, "Lenovo 13X", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), + SND_PCI_QUIRK(0x17aa, 0x390d, "Lenovo Yoga Pro 7 14ASP10", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN), SND_PCI_QUIRK(0x17aa, 0x3913, "Lenovo 145", ALC236_FIXUP_LENOVO_INV_DMIC), SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), @@ -10192,6 +10405,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), SND_PCI_QUIRK(0x17aa, 0x9e56, "Lenovo ZhaoYang CF4620Z", ALC286_FIXUP_SONY_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1849, 0x0269, "Positivo Master C6400", ALC269VB_FIXUP_ASUS_ZENBOOK), SND_PCI_QUIRK(0x1849, 0x1233, "ASRock NUC Box 1100", ALC233_FIXUP_NO_AUDIO_JACK), SND_PCI_QUIRK(0x1849, 0xa233, "Positivo Master C6300", ALC269_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS), @@ -10222,8 +10436,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), + SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13), SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), + SND_PCI_QUIRK(0x2782, 0x1701, "Infinix Y4 Max", ALC269VC_FIXUP_INFINIX_Y4_MAX), + SND_PCI_QUIRK(0x2782, 0x1705, "MEDION E15433", ALC269VC_FIXUP_INFINIX_Y4_MAX), SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME), SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), @@ -10232,6 +10451,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE), #if 0 /* Below is a quirk table taken from the old code. @@ -10413,6 +10633,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC295_FIXUP_HP_OMEN, .name = "alc295-hp-omen"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360, .name = "alc285-hp-spectre-x360"}, {.id = ALC285_FIXUP_HP_SPECTRE_X360_EB1, .name = "alc285-hp-spectre-x360-eb1"}, + {.id = ALC285_FIXUP_HP_SPECTRE_X360_DF1, .name = "alc285-hp-spectre-x360-df1"}, {.id = ALC285_FIXUP_HP_ENVY_X360, .name = "alc285-hp-envy-x360"}, {.id = ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP, .name = "alc287-ideapad-bass-spk-amp"}, {.id = ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN, .name = "alc287-yoga9-bass-spk-pin"}, @@ -10420,6 +10641,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"}, {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"}, {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"}, + {.id = ALC2XX_FIXUP_HEADSET_MIC, .name = "alc2xx-fixup-headset-mic"}, {} }; #define ALC225_STANDARD_PINS \ @@ -10872,6 +11094,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = { {0x1a, 0x40000000}), SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC, {0x19, 0x40000000}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1558, "Clevo", ALC2XX_FIXUP_HEADSET_MIC, + {0x19, 0x40000000}), {} }; @@ -11061,8 +11285,11 @@ static int patch_alc269(struct hda_codec *codec) spec->codec_variant = ALC269_TYPE_ALC300; spec->gen.mixer_nid = 0; /* no loopback on ALC300 */ break; + case 0x10ec0222: case 0x10ec0623: spec->codec_variant = ALC269_TYPE_ALC623; + spec->shutup = alc222_shutup; + spec->init_hook = alc222_init; break; case 0x10ec0700: case 0x10ec0701: diff --git a/sound/sh/Kconfig b/sound/sh/Kconfig index b75fbb3236a7..f5fa09d740b4 100644 --- a/sound/sh/Kconfig +++ b/sound/sh/Kconfig @@ -14,7 +14,7 @@ if SND_SUPERH config SND_AICA tristate "Dreamcast Yamaha AICA sound" - depends on SH_DREAMCAST + depends on SH_DREAMCAST && SH_DMA_API select SND_PCM select G2_DMA help diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig index 44d4e6e51a35..6e358909bc5e 100644 --- a/sound/soc/amd/Kconfig +++ b/sound/soc/amd/Kconfig @@ -103,7 +103,7 @@ config SND_SOC_AMD_ACP6x config SND_SOC_AMD_YC_MACH tristate "AMD YC support for DMIC" select SND_SOC_DMIC - depends on SND_SOC_AMD_ACP6x + depends on SND_SOC_AMD_ACP6x && ACPI help This option enables machine driver for Yellow Carp platform using dmic. ACP IP has PDM Decoder block with DMA controller. diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index c18549759eab..1f94269e121a 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -220,6 +220,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "21J6"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21M1"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -227,6 +234,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "21M3"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21M4"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -234,6 +248,20 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "21M5"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21M6"), + } + }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21ME"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -276,6 +304,34 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "83AS"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83L3"), + } + }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83N6"), + } + }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"), + } + }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -423,6 +479,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "pang13"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."), + DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7UCX"), + } + }, {} }; @@ -432,8 +495,14 @@ static int acp6x_probe(struct platform_device *pdev) struct acp6x_pdm *machine = NULL; struct snd_soc_card *card; struct acpi_device *adev; + acpi_handle handle; + acpi_integer dmic_status; int ret; + bool is_dmic_enable, wov_en; + /* IF WOV entry not found, enable dmic based on AcpDmicConnected entry*/ + is_dmic_enable = false; + wov_en = true; /* check the parent device's firmware node has _DSD or not */ adev = ACPI_COMPANION(pdev->dev.parent); if (adev) { @@ -441,9 +510,24 @@ static int acp6x_probe(struct platform_device *pdev) if (!acpi_dev_get_property(adev, "AcpDmicConnected", ACPI_TYPE_INTEGER, &obj) && obj->integer.value == 1) - platform_set_drvdata(pdev, &acp6x_card); + is_dmic_enable = true; } + handle = ACPI_HANDLE(pdev->dev.parent); + ret = acpi_evaluate_integer(handle, "_WOV", NULL, &dmic_status); + if (!ACPI_FAILURE(ret)) { + wov_en = dmic_status; + if (!wov_en) + return -ENODEV; + } else { + /* Incase of ACPI method read failure then jump to check_dmi_entry */ + goto check_dmi_entry; + } + + if (is_dmic_enable) + platform_set_drvdata(pdev, &acp6x_card); + +check_dmi_entry: /* check for any DMI overrides */ dmi_id = dmi_first_match(yc_acp_quirk_table); if (dmi_id) diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 6f6acbdedab6..958f38c87e6c 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -2093,6 +2093,7 @@ config SND_SOC_WM8993 config SND_SOC_WM8994 tristate + depends on MFD_WM8994 config SND_SOC_WM8995 tristate diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c index f75c19ef3551..3f790d1f11a9 100644 --- a/sound/soc/codecs/ak4613.c +++ b/sound/soc/codecs/ak4613.c @@ -840,14 +840,14 @@ static void ak4613_parse_of(struct ak4613_priv *priv, /* Input 1 - 2 */ for (i = 0; i < 2; i++) { snprintf(prop, sizeof(prop), "asahi-kasei,in%d-single-end", i + 1); - if (!of_get_property(np, prop, NULL)) + if (!of_property_read_bool(np, prop)) priv->ic |= 1 << i; } /* Output 1 - 6 */ for (i = 0; i < 6; i++) { snprintf(prop, sizeof(prop), "asahi-kasei,out%d-single-end", i + 1); - if (!of_get_property(np, prop, NULL)) + if (!of_property_read_bool(np, prop)) priv->oc |= 1 << i; } diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index 7434aeeda292..7a74941c608f 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c @@ -967,7 +967,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, case ARIZONA_OUT3L_ENA_SHIFT: case ARIZONA_OUT3R_ENA_SHIFT: priv->out_up_pending++; - priv->out_up_delay += 17; + priv->out_up_delay += 17000; break; case ARIZONA_OUT4L_ENA_SHIFT: case ARIZONA_OUT4R_ENA_SHIFT: @@ -977,7 +977,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, case WM8997: break; default: - priv->out_up_delay += 10; + priv->out_up_delay += 10000; break; } break; @@ -999,7 +999,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, if (!priv->out_up_pending && priv->out_up_delay) { dev_dbg(component->dev, "Power up delay: %d\n", priv->out_up_delay); - msleep(priv->out_up_delay); + fsleep(priv->out_up_delay); priv->out_up_delay = 0; } break; @@ -1017,7 +1017,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, case ARIZONA_OUT3L_ENA_SHIFT: case ARIZONA_OUT3R_ENA_SHIFT: priv->out_down_pending++; - priv->out_down_delay++; + priv->out_down_delay += 1000; break; case ARIZONA_OUT4L_ENA_SHIFT: case ARIZONA_OUT4R_ENA_SHIFT: @@ -1028,10 +1028,10 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, break; case WM8998: case WM1814: - priv->out_down_delay += 5; + priv->out_down_delay += 5000; break; default: - priv->out_down_delay++; + priv->out_down_delay += 1000; break; } break; @@ -1053,7 +1053,7 @@ int arizona_out_ev(struct snd_soc_dapm_widget *w, if (!priv->out_down_pending && priv->out_down_delay) { dev_dbg(component->dev, "Power down delay: %d\n", priv->out_down_delay); - msleep(priv->out_down_delay); + fsleep(priv->out_down_delay); priv->out_down_delay = 0; } break; diff --git a/sound/soc/codecs/cs35l41-spi.c b/sound/soc/codecs/cs35l41-spi.c index 5c8bb24909eb..bd73944758c6 100644 --- a/sound/soc/codecs/cs35l41-spi.c +++ b/sound/soc/codecs/cs35l41-spi.c @@ -39,7 +39,9 @@ static int cs35l41_spi_probe(struct spi_device *spi) return -ENOMEM; spi->max_speed_hz = CS35L41_SPI_MAX_FREQ; - spi_setup(spi); + ret = spi_setup(spi); + if (ret < 0) + return ret; spi_set_drvdata(spi, cs35l41); cs35l41->regmap = devm_regmap_init_spi(spi, regmap_config); diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c index 4746c8700451..e100ee4921e3 100644 --- a/sound/soc/codecs/da7219.c +++ b/sound/soc/codecs/da7219.c @@ -1167,17 +1167,20 @@ static int da7219_set_dai_sysclk(struct snd_soc_dai *codec_dai, struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component); int ret = 0; - if ((da7219->clk_src == clk_id) && (da7219->mclk_rate == freq)) + mutex_lock(&da7219->pll_lock); + + if ((da7219->clk_src == clk_id) && (da7219->mclk_rate == freq)) { + mutex_unlock(&da7219->pll_lock); return 0; + } if ((freq < 2000000) || (freq > 54000000)) { + mutex_unlock(&da7219->pll_lock); dev_err(codec_dai->dev, "Unsupported MCLK value %d\n", freq); return -EINVAL; } - mutex_lock(&da7219->pll_lock); - switch (clk_id) { case DA7219_CLKSRC_MCLK_SQR: snd_soc_component_update_bits(component, DA7219_PLL_CTRL, diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index 7bb1c536b917..3d225913c004 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -244,7 +244,6 @@ static const struct snd_kcontrol_new es8328_right_line_controls = /* Left Mixer */ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { - SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL17, 7, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL17, 6, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", ES8328_DACCONTROL18, 7, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL18, 6, 1, 0), @@ -254,7 +253,6 @@ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = { static const struct snd_kcontrol_new es8328_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", ES8328_DACCONTROL19, 7, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", ES8328_DACCONTROL19, 6, 1, 0), - SOC_DAPM_SINGLE("Playback Switch", ES8328_DACCONTROL20, 7, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", ES8328_DACCONTROL20, 6, 1, 0), }; @@ -347,10 +345,10 @@ static const struct snd_soc_dapm_widget es8328_dapm_widgets[] = { SND_SOC_DAPM_DAC("Left DAC", "Left Playback", ES8328_DACPOWER, ES8328_DACPOWER_LDAC_OFF, 1), - SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_MIXER("Left Mixer", ES8328_DACCONTROL17, 7, 0, &es8328_left_mixer_controls[0], ARRAY_SIZE(es8328_left_mixer_controls)), - SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, + SND_SOC_DAPM_MIXER("Right Mixer", ES8328_DACCONTROL20, 7, 0, &es8328_right_mixer_controls[0], ARRAY_SIZE(es8328_right_mixer_controls)), @@ -429,19 +427,14 @@ static const struct snd_soc_dapm_route es8328_dapm_routes[] = { { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, - { "Left Out 1", NULL, "Left DAC" }, - { "Right Out 1", NULL, "Right DAC" }, - { "Left Out 2", NULL, "Left DAC" }, - { "Right Out 2", NULL, "Right DAC" }, - - { "Left Mixer", "Playback Switch", "Left DAC" }, + { "Left Mixer", NULL, "Left DAC" }, { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Left Mixer", "Right Playback Switch", "Right DAC" }, { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Right Mixer", "Left Playback Switch", "Left DAC" }, { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, - { "Right Mixer", "Playback Switch", "Right DAC" }, + { "Right Mixer", NULL, "Right DAC" }, { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, { "DAC DIG", NULL, "DAC STM" }, diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 2849ea39efc8..6a51f83e246d 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -184,84 +184,97 @@ static const struct snd_pcm_chmap_elem hdmi_codec_8ch_chmaps[] = { /* * hdmi_codec_channel_alloc: speaker configuration available for CEA * - * This is an ordered list that must match with hdmi_codec_8ch_chmaps struct + * This is an ordered list where ca_id must exist in hdmi_codec_8ch_chmaps * The preceding ones have better chances to be selected by * hdmi_codec_get_ch_alloc_table_idx(). */ static const struct hdmi_codec_cea_spk_alloc hdmi_codec_channel_alloc[] = { { .ca_id = 0x00, .n_ch = 2, - .mask = FL | FR}, - /* 2.1 */ - { .ca_id = 0x01, .n_ch = 4, - .mask = FL | FR | LFE}, - /* Dolby Surround */ + .mask = FL | FR }, + { .ca_id = 0x03, .n_ch = 4, + .mask = FL | FR | LFE | FC }, { .ca_id = 0x02, .n_ch = 4, .mask = FL | FR | FC }, - /* surround51 */ + { .ca_id = 0x01, .n_ch = 4, + .mask = FL | FR | LFE }, { .ca_id = 0x0b, .n_ch = 6, - .mask = FL | FR | LFE | FC | RL | RR}, - /* surround40 */ - { .ca_id = 0x08, .n_ch = 6, - .mask = FL | FR | RL | RR }, - /* surround41 */ - { .ca_id = 0x09, .n_ch = 6, - .mask = FL | FR | LFE | RL | RR }, - /* surround50 */ + .mask = FL | FR | LFE | FC | RL | RR }, { .ca_id = 0x0a, .n_ch = 6, .mask = FL | FR | FC | RL | RR }, - /* 6.1 */ - { .ca_id = 0x0f, .n_ch = 8, - .mask = FL | FR | LFE | FC | RL | RR | RC }, - /* surround71 */ + { .ca_id = 0x09, .n_ch = 6, + .mask = FL | FR | LFE | RL | RR }, + { .ca_id = 0x08, .n_ch = 6, + .mask = FL | FR | RL | RR }, + { .ca_id = 0x07, .n_ch = 6, + .mask = FL | FR | LFE | FC | RC }, + { .ca_id = 0x06, .n_ch = 6, + .mask = FL | FR | FC | RC }, + { .ca_id = 0x05, .n_ch = 6, + .mask = FL | FR | LFE | RC }, + { .ca_id = 0x04, .n_ch = 6, + .mask = FL | FR | RC }, { .ca_id = 0x13, .n_ch = 8, .mask = FL | FR | LFE | FC | RL | RR | RLC | RRC }, - /* others */ - { .ca_id = 0x03, .n_ch = 8, - .mask = FL | FR | LFE | FC }, - { .ca_id = 0x04, .n_ch = 8, - .mask = FL | FR | RC}, - { .ca_id = 0x05, .n_ch = 8, - .mask = FL | FR | LFE | RC }, - { .ca_id = 0x06, .n_ch = 8, - .mask = FL | FR | FC | RC }, - { .ca_id = 0x07, .n_ch = 8, - .mask = FL | FR | LFE | FC | RC }, - { .ca_id = 0x0c, .n_ch = 8, - .mask = FL | FR | RC | RL | RR }, - { .ca_id = 0x0d, .n_ch = 8, - .mask = FL | FR | LFE | RL | RR | RC }, - { .ca_id = 0x0e, .n_ch = 8, - .mask = FL | FR | FC | RL | RR | RC }, - { .ca_id = 0x10, .n_ch = 8, - .mask = FL | FR | RL | RR | RLC | RRC }, - { .ca_id = 0x11, .n_ch = 8, - .mask = FL | FR | LFE | RL | RR | RLC | RRC }, - { .ca_id = 0x12, .n_ch = 8, - .mask = FL | FR | FC | RL | RR | RLC | RRC }, - { .ca_id = 0x14, .n_ch = 8, - .mask = FL | FR | FLC | FRC }, - { .ca_id = 0x15, .n_ch = 8, - .mask = FL | FR | LFE | FLC | FRC }, - { .ca_id = 0x16, .n_ch = 8, - .mask = FL | FR | FC | FLC | FRC }, - { .ca_id = 0x17, .n_ch = 8, - .mask = FL | FR | LFE | FC | FLC | FRC }, - { .ca_id = 0x18, .n_ch = 8, - .mask = FL | FR | RC | FLC | FRC }, - { .ca_id = 0x19, .n_ch = 8, - .mask = FL | FR | LFE | RC | FLC | FRC }, - { .ca_id = 0x1a, .n_ch = 8, - .mask = FL | FR | RC | FC | FLC | FRC }, - { .ca_id = 0x1b, .n_ch = 8, - .mask = FL | FR | LFE | RC | FC | FLC | FRC }, - { .ca_id = 0x1c, .n_ch = 8, - .mask = FL | FR | RL | RR | FLC | FRC }, - { .ca_id = 0x1d, .n_ch = 8, - .mask = FL | FR | LFE | RL | RR | FLC | FRC }, - { .ca_id = 0x1e, .n_ch = 8, - .mask = FL | FR | FC | RL | RR | FLC | FRC }, { .ca_id = 0x1f, .n_ch = 8, .mask = FL | FR | LFE | FC | RL | RR | FLC | FRC }, + { .ca_id = 0x12, .n_ch = 8, + .mask = FL | FR | FC | RL | RR | RLC | RRC }, + { .ca_id = 0x1e, .n_ch = 8, + .mask = FL | FR | FC | RL | RR | FLC | FRC }, + { .ca_id = 0x11, .n_ch = 8, + .mask = FL | FR | LFE | RL | RR | RLC | RRC }, + { .ca_id = 0x1d, .n_ch = 8, + .mask = FL | FR | LFE | RL | RR | FLC | FRC }, + { .ca_id = 0x10, .n_ch = 8, + .mask = FL | FR | RL | RR | RLC | RRC }, + { .ca_id = 0x1c, .n_ch = 8, + .mask = FL | FR | RL | RR | FLC | FRC }, + { .ca_id = 0x0f, .n_ch = 8, + .mask = FL | FR | LFE | FC | RL | RR | RC }, + { .ca_id = 0x1b, .n_ch = 8, + .mask = FL | FR | LFE | RC | FC | FLC | FRC }, + { .ca_id = 0x0e, .n_ch = 8, + .mask = FL | FR | FC | RL | RR | RC }, + { .ca_id = 0x1a, .n_ch = 8, + .mask = FL | FR | RC | FC | FLC | FRC }, + { .ca_id = 0x0d, .n_ch = 8, + .mask = FL | FR | LFE | RL | RR | RC }, + { .ca_id = 0x19, .n_ch = 8, + .mask = FL | FR | LFE | RC | FLC | FRC }, + { .ca_id = 0x0c, .n_ch = 8, + .mask = FL | FR | RC | RL | RR }, + { .ca_id = 0x18, .n_ch = 8, + .mask = FL | FR | RC | FLC | FRC }, + { .ca_id = 0x17, .n_ch = 8, + .mask = FL | FR | LFE | FC | FLC | FRC }, + { .ca_id = 0x16, .n_ch = 8, + .mask = FL | FR | FC | FLC | FRC }, + { .ca_id = 0x15, .n_ch = 8, + .mask = FL | FR | LFE | FLC | FRC }, + { .ca_id = 0x14, .n_ch = 8, + .mask = FL | FR | FLC | FRC }, + { .ca_id = 0x0b, .n_ch = 8, + .mask = FL | FR | LFE | FC | RL | RR }, + { .ca_id = 0x0a, .n_ch = 8, + .mask = FL | FR | FC | RL | RR }, + { .ca_id = 0x09, .n_ch = 8, + .mask = FL | FR | LFE | RL | RR }, + { .ca_id = 0x08, .n_ch = 8, + .mask = FL | FR | RL | RR }, + { .ca_id = 0x07, .n_ch = 8, + .mask = FL | FR | LFE | FC | RC }, + { .ca_id = 0x06, .n_ch = 8, + .mask = FL | FR | FC | RC }, + { .ca_id = 0x05, .n_ch = 8, + .mask = FL | FR | LFE | RC }, + { .ca_id = 0x04, .n_ch = 8, + .mask = FL | FR | RC }, + { .ca_id = 0x03, .n_ch = 8, + .mask = FL | FR | LFE | FC }, + { .ca_id = 0x02, .n_ch = 8, + .mask = FL | FR | FC }, + { .ca_id = 0x01, .n_ch = 8, + .mask = FL | FR | LFE }, }; struct hdmi_codec_priv { @@ -376,7 +389,8 @@ static int hdmi_codec_chmap_ctl_get(struct snd_kcontrol *kcontrol, struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); struct hdmi_codec_priv *hcp = info->private_data; - map = info->chmap[hcp->chmap_idx].map; + if (hcp->chmap_idx != HDMI_CODEC_CHMAP_IDX_UNKNOWN) + map = info->chmap[hcp->chmap_idx].map; for (i = 0; i < info->max_channels; i++) { if (hcp->chmap_idx == HDMI_CODEC_CHMAP_IDX_UNKNOWN) diff --git a/sound/soc/codecs/lpass-wsa-macro.c b/sound/soc/codecs/lpass-wsa-macro.c index bb9de5767ebc..e217c56a7e87 100644 --- a/sound/soc/codecs/lpass-wsa-macro.c +++ b/sound/soc/codecs/lpass-wsa-macro.c @@ -63,6 +63,10 @@ #define CDC_WSA_TX_SPKR_PROT_CLK_DISABLE 0 #define CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK GENMASK(3, 0) #define CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K 0 +#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_16K 1 +#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_24K 2 +#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_32K 3 +#define CDC_WSA_TX_SPKR_PROT_PCM_RATE_48K 4 #define CDC_WSA_TX0_SPKR_PROT_PATH_CFG0 (0x0248) #define CDC_WSA_TX1_SPKR_PROT_PATH_CTL (0x0264) #define CDC_WSA_TX1_SPKR_PROT_PATH_CFG0 (0x0268) @@ -344,6 +348,7 @@ struct wsa_macro { int ear_spkr_gain; int spkr_gain_offset; int spkr_mode; + u32 pcm_rate_vi; int is_softclip_on[WSA_MACRO_SOFTCLIP_MAX]; int softclip_clk_users[WSA_MACRO_SOFTCLIP_MAX]; struct regmap *regmap; @@ -971,6 +976,7 @@ static int wsa_macro_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; + struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); int ret; switch (substream->stream) { @@ -982,6 +988,11 @@ static int wsa_macro_hw_params(struct snd_pcm_substream *substream, __func__, params_rate(params)); return ret; } + break; + case SNDRV_PCM_STREAM_CAPTURE: + if (dai->id == WSA_MACRO_AIF_VI) + wsa->pcm_rate_vi = params_rate(params); + break; default: break; @@ -1139,6 +1150,67 @@ static void wsa_macro_mclk_enable(struct wsa_macro *wsa, bool mclk_enable) } } +static void wsa_macro_enable_disable_vi_sense(struct snd_soc_component *component, bool enable, + u32 tx_reg0, u32 tx_reg1, u32 val) +{ + if (enable) { + /* Enable V&I sensing */ + snd_soc_component_update_bits(component, tx_reg0, + CDC_WSA_TX_SPKR_PROT_RESET_MASK, + CDC_WSA_TX_SPKR_PROT_RESET); + snd_soc_component_update_bits(component, tx_reg1, + CDC_WSA_TX_SPKR_PROT_RESET_MASK, + CDC_WSA_TX_SPKR_PROT_RESET); + snd_soc_component_update_bits(component, tx_reg0, + CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK, + val); + snd_soc_component_update_bits(component, tx_reg1, + CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK, + val); + snd_soc_component_update_bits(component, tx_reg0, + CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, + CDC_WSA_TX_SPKR_PROT_CLK_ENABLE); + snd_soc_component_update_bits(component, tx_reg1, + CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, + CDC_WSA_TX_SPKR_PROT_CLK_ENABLE); + snd_soc_component_update_bits(component, tx_reg0, + CDC_WSA_TX_SPKR_PROT_RESET_MASK, + CDC_WSA_TX_SPKR_PROT_NO_RESET); + snd_soc_component_update_bits(component, tx_reg1, + CDC_WSA_TX_SPKR_PROT_RESET_MASK, + CDC_WSA_TX_SPKR_PROT_NO_RESET); + } else { + snd_soc_component_update_bits(component, tx_reg0, + CDC_WSA_TX_SPKR_PROT_RESET_MASK, + CDC_WSA_TX_SPKR_PROT_RESET); + snd_soc_component_update_bits(component, tx_reg1, + CDC_WSA_TX_SPKR_PROT_RESET_MASK, + CDC_WSA_TX_SPKR_PROT_RESET); + snd_soc_component_update_bits(component, tx_reg0, + CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, + CDC_WSA_TX_SPKR_PROT_CLK_DISABLE); + snd_soc_component_update_bits(component, tx_reg1, + CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, + CDC_WSA_TX_SPKR_PROT_CLK_DISABLE); + } +} + +static void wsa_macro_enable_disable_vi_feedback(struct snd_soc_component *component, + bool enable, u32 rate) +{ + struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); + + if (test_bit(WSA_MACRO_TX0, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) + wsa_macro_enable_disable_vi_sense(component, enable, + CDC_WSA_TX0_SPKR_PROT_PATH_CTL, + CDC_WSA_TX1_SPKR_PROT_PATH_CTL, rate); + + if (test_bit(WSA_MACRO_TX1, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) + wsa_macro_enable_disable_vi_sense(component, enable, + CDC_WSA_TX2_SPKR_PROT_PATH_CTL, + CDC_WSA_TX3_SPKR_PROT_PATH_CTL, rate); +} + static int wsa_macro_mclk_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -1155,58 +1227,37 @@ static int wsa_macro_enable_vi_feedback(struct snd_soc_dapm_widget *w, { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct wsa_macro *wsa = snd_soc_component_get_drvdata(component); - u32 tx_reg0, tx_reg1; + u32 rate_val; - if (test_bit(WSA_MACRO_TX0, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) { - tx_reg0 = CDC_WSA_TX0_SPKR_PROT_PATH_CTL; - tx_reg1 = CDC_WSA_TX1_SPKR_PROT_PATH_CTL; - } else if (test_bit(WSA_MACRO_TX1, &wsa->active_ch_mask[WSA_MACRO_AIF_VI])) { - tx_reg0 = CDC_WSA_TX2_SPKR_PROT_PATH_CTL; - tx_reg1 = CDC_WSA_TX3_SPKR_PROT_PATH_CTL; + switch (wsa->pcm_rate_vi) { + case 8000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K; + break; + case 16000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_16K; + break; + case 24000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_24K; + break; + case 32000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_32K; + break; + case 48000: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_48K; + break; + default: + rate_val = CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K; + break; } switch (event) { case SND_SOC_DAPM_POST_PMU: - /* Enable V&I sensing */ - snd_soc_component_update_bits(component, tx_reg0, - CDC_WSA_TX_SPKR_PROT_RESET_MASK, - CDC_WSA_TX_SPKR_PROT_RESET); - snd_soc_component_update_bits(component, tx_reg1, - CDC_WSA_TX_SPKR_PROT_RESET_MASK, - CDC_WSA_TX_SPKR_PROT_RESET); - snd_soc_component_update_bits(component, tx_reg0, - CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK, - CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K); - snd_soc_component_update_bits(component, tx_reg1, - CDC_WSA_TX_SPKR_PROT_PCM_RATE_MASK, - CDC_WSA_TX_SPKR_PROT_PCM_RATE_8K); - snd_soc_component_update_bits(component, tx_reg0, - CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, - CDC_WSA_TX_SPKR_PROT_CLK_ENABLE); - snd_soc_component_update_bits(component, tx_reg1, - CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, - CDC_WSA_TX_SPKR_PROT_CLK_ENABLE); - snd_soc_component_update_bits(component, tx_reg0, - CDC_WSA_TX_SPKR_PROT_RESET_MASK, - CDC_WSA_TX_SPKR_PROT_NO_RESET); - snd_soc_component_update_bits(component, tx_reg1, - CDC_WSA_TX_SPKR_PROT_RESET_MASK, - CDC_WSA_TX_SPKR_PROT_NO_RESET); + /* Enable V&I sensing */ + wsa_macro_enable_disable_vi_feedback(component, true, rate_val); break; case SND_SOC_DAPM_POST_PMD: /* Disable V&I sensing */ - snd_soc_component_update_bits(component, tx_reg0, - CDC_WSA_TX_SPKR_PROT_RESET_MASK, - CDC_WSA_TX_SPKR_PROT_RESET); - snd_soc_component_update_bits(component, tx_reg1, - CDC_WSA_TX_SPKR_PROT_RESET_MASK, - CDC_WSA_TX_SPKR_PROT_RESET); - snd_soc_component_update_bits(component, tx_reg0, - CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, - CDC_WSA_TX_SPKR_PROT_CLK_DISABLE); - snd_soc_component_update_bits(component, tx_reg1, - CDC_WSA_TX_SPKR_PROT_CLK_EN_MASK, - CDC_WSA_TX_SPKR_PROT_CLK_DISABLE); + wsa_macro_enable_disable_vi_feedback(component, false, rate_val); break; } diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c index b9f19fbd2911..30e680ee1069 100644 --- a/sound/soc/codecs/madera.c +++ b/sound/soc/codecs/madera.c @@ -2322,10 +2322,10 @@ int madera_out_ev(struct snd_soc_dapm_widget *w, case CS42L92: case CS47L92: case CS47L93: - out_up_delay = 6; + out_up_delay = 6000; break; default: - out_up_delay = 17; + out_up_delay = 17000; break; } @@ -2356,7 +2356,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w, case MADERA_OUT3R_ENA_SHIFT: priv->out_up_pending--; if (!priv->out_up_pending) { - msleep(priv->out_up_delay); + fsleep(priv->out_up_delay); priv->out_up_delay = 0; } break; @@ -2375,7 +2375,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w, case MADERA_OUT3L_ENA_SHIFT: case MADERA_OUT3R_ENA_SHIFT: priv->out_down_pending++; - priv->out_down_delay++; + priv->out_down_delay += 1000; break; default: break; @@ -2392,7 +2392,7 @@ int madera_out_ev(struct snd_soc_dapm_widget *w, case MADERA_OUT3R_ENA_SHIFT: priv->out_down_pending--; if (!priv->out_down_pending) { - msleep(priv->out_down_delay); + fsleep(priv->out_down_delay); priv->out_down_delay = 0; } break; diff --git a/sound/soc/codecs/mt6359-accdet.h b/sound/soc/codecs/mt6359-accdet.h index c234f2f4276a..78ada3a5bfae 100644 --- a/sound/soc/codecs/mt6359-accdet.h +++ b/sound/soc/codecs/mt6359-accdet.h @@ -123,6 +123,15 @@ struct mt6359_accdet { struct workqueue_struct *jd_workqueue; }; +#if IS_ENABLED(CONFIG_SND_SOC_MT6359_ACCDET) int mt6359_accdet_enable_jack_detect(struct snd_soc_component *component, struct snd_soc_jack *jack); +#else +static inline int +mt6359_accdet_enable_jack_detect(struct snd_soc_component *component, + struct snd_soc_jack *jack) +{ + return -EOPNOTSUPP; +} +#endif #endif diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c index 9d6431338fb7..329549936bd5 100644 --- a/sound/soc/codecs/pcm3168a.c +++ b/sound/soc/codecs/pcm3168a.c @@ -494,9 +494,9 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream, } break; case 24: - if (provider_mode || (format == SND_SOC_DAIFMT_DSP_A) || - (format == SND_SOC_DAIFMT_DSP_B)) { - dev_err(component->dev, "24-bit slots not supported in provider mode, or consumer mode using DSP\n"); + if (!provider_mode && ((format == SND_SOC_DAIFMT_DSP_A) || + (format == SND_SOC_DAIFMT_DSP_B))) { + dev_err(component->dev, "24-bit slots not supported in consumer mode using DSP\n"); return -EINVAL; } break; diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index 2e0ed3e68fa5..10f0f07b90ff 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -182,33 +182,6 @@ static SOC_ENUM_SINGLE_DECL( static const struct snd_kcontrol_new tas2764_asi1_mux = SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum); -static int tas2764_dac_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); - struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); - int ret; - - switch (event) { - case SND_SOC_DAPM_POST_PMU: - tas2764->dac_powered = true; - ret = tas2764_update_pwr_ctrl(tas2764); - break; - case SND_SOC_DAPM_PRE_PMD: - tas2764->dac_powered = false; - ret = tas2764_update_pwr_ctrl(tas2764); - break; - default: - dev_err(tas2764->dev, "Unsupported event\n"); - return -EINVAL; - } - - if (ret < 0) - return ret; - - return 0; -} - static const struct snd_kcontrol_new isense_switch = SOC_DAPM_SINGLE("Switch", TAS2764_PWR_CTRL, TAS2764_ISENSE_POWER_EN, 1, 1); static const struct snd_kcontrol_new vsense_switch = @@ -221,8 +194,7 @@ static const struct snd_soc_dapm_widget tas2764_dapm_widgets[] = { 1, &isense_switch), SND_SOC_DAPM_SWITCH("VSENSE", TAS2764_PWR_CTRL, TAS2764_VSENSE_POWER_EN, 1, &vsense_switch), - SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas2764_dac_event, - SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), + SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_OUTPUT("OUT"), SND_SOC_DAPM_SIGGEN("VMON"), SND_SOC_DAPM_SIGGEN("IMON") @@ -243,9 +215,28 @@ static int tas2764_mute(struct snd_soc_dai *dai, int mute, int direction) { struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(dai->component); + int ret; + + if (!mute) { + tas2764->dac_powered = true; + ret = tas2764_update_pwr_ctrl(tas2764); + if (ret) + return ret; + } tas2764->unmuted = !mute; - return tas2764_update_pwr_ctrl(tas2764); + ret = tas2764_update_pwr_ctrl(tas2764); + if (ret) + return ret; + + if (mute) { + tas2764->dac_powered = false; + ret = tas2764_update_pwr_ctrl(tas2764); + if (ret) + return ret; + } + + return 0; } static int tas2764_set_bitwidth(struct tas2764_priv *tas2764, int bitwidth) @@ -367,7 +358,7 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_component *component = dai->component; struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); - u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0; + u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0, asi_cfg_4 = 0; int ret; switch (fmt & SND_SOC_DAIFMT_INV_MASK) { @@ -376,12 +367,14 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) fallthrough; case SND_SOC_DAIFMT_NB_NF: asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING; + asi_cfg_4 = TAS2764_TDM_CFG4_TX_FALLING; break; case SND_SOC_DAIFMT_IB_IF: asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; fallthrough; case SND_SOC_DAIFMT_IB_NF: asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING; + asi_cfg_4 = TAS2764_TDM_CFG4_TX_RISING; break; } @@ -391,6 +384,12 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) if (ret < 0) return ret; + ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG4, + TAS2764_TDM_CFG4_TX_MASK, + asi_cfg_4); + if (ret < 0) + return ret; + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; @@ -628,6 +627,7 @@ static const struct reg_default tas2764_reg_defaults[] = { { TAS2764_TDM_CFG2, 0x0a }, { TAS2764_TDM_CFG3, 0x10 }, { TAS2764_TDM_CFG5, 0x42 }, + { TAS2764_INT_CLK_CFG, 0x19 }, }; static const struct regmap_range_cfg tas2764_regmap_ranges[] = { @@ -645,6 +645,7 @@ static const struct regmap_range_cfg tas2764_regmap_ranges[] = { static bool tas2764_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { + case TAS2764_SW_RST: case TAS2764_INT_LTCH0 ... TAS2764_INT_LTCH4: case TAS2764_INT_CLK_CFG: return true; diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h index 168af772a898..9490f2686e38 100644 --- a/sound/soc/codecs/tas2764.h +++ b/sound/soc/codecs/tas2764.h @@ -25,7 +25,7 @@ /* Power Control */ #define TAS2764_PWR_CTRL TAS2764_REG(0X0, 0x02) -#define TAS2764_PWR_CTRL_MASK GENMASK(1, 0) +#define TAS2764_PWR_CTRL_MASK GENMASK(2, 0) #define TAS2764_PWR_CTRL_ACTIVE 0x0 #define TAS2764_PWR_CTRL_MUTE BIT(0) #define TAS2764_PWR_CTRL_SHUTDOWN BIT(1) @@ -79,6 +79,12 @@ #define TAS2764_TDM_CFG3_RXS_SHIFT 0x4 #define TAS2764_TDM_CFG3_MASK GENMASK(3, 0) +/* TDM Configuration Reg4 */ +#define TAS2764_TDM_CFG4 TAS2764_REG(0X0, 0x0d) +#define TAS2764_TDM_CFG4_TX_MASK BIT(0) +#define TAS2764_TDM_CFG4_TX_RISING 0x0 +#define TAS2764_TDM_CFG4_TX_FALLING BIT(0) + /* TDM Configuration Reg5 */ #define TAS2764_TDM_CFG5 TAS2764_REG(0X0, 0x0e) #define TAS2764_TDM_CFG5_VSNS_MASK BIT(6) diff --git a/sound/soc/codecs/tas2770.c b/sound/soc/codecs/tas2770.c index 8557759acb1f..e284a3a85459 100644 --- a/sound/soc/codecs/tas2770.c +++ b/sound/soc/codecs/tas2770.c @@ -508,7 +508,7 @@ static int tas2770_codec_probe(struct snd_soc_component *component) } static DECLARE_TLV_DB_SCALE(tas2770_digital_tlv, 1100, 50, 0); -static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -12750, 50, 0); +static DECLARE_TLV_DB_SCALE(tas2770_playback_volume, -10050, 50, 0); static const struct snd_kcontrol_new tas2770_snd_controls[] = { SOC_SINGLE_TLV("Speaker Playback Volume", TAS2770_PLAY_CFG_REG2, diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index 0b5999c819db..04c50f9acda1 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -2281,7 +2281,7 @@ static irqreturn_t wcd934x_slim_irq_handler(int irq, void *data) { struct wcd934x_codec *wcd = data; unsigned long status = 0; - int i, j, port_id; + unsigned int i, j, port_id; unsigned int val, int_val = 0; irqreturn_t ret = IRQ_NONE; bool tx; diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c index 034a4e858c7e..602fa7cc2c5e 100644 --- a/sound/soc/codecs/wm0010.c +++ b/sound/soc/codecs/wm0010.c @@ -951,7 +951,7 @@ static int wm0010_spi_probe(struct spi_device *spi) if (ret) { dev_err(wm0010->dev, "Failed to set IRQ %d as wake source: %d\n", irq, ret); - return ret; + goto free_irq; } if (spi->max_speed_hz) @@ -963,9 +963,18 @@ static int wm0010_spi_probe(struct spi_device *spi) &soc_component_dev_wm0010, wm0010_dai, ARRAY_SIZE(wm0010_dai)); if (ret < 0) - return ret; + goto disable_irq_wake; return 0; + +disable_irq_wake: + irq_set_irq_wake(wm0010->irq, 0); + +free_irq: + if (wm0010->irq) + free_irq(wm0010->irq, wm0010); + + return ret; } static void wm0010_spi_remove(struct spi_device *spi) diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index e0b971620d0f..6db17349484c 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -302,7 +302,7 @@ static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w) } else { wseq = wm5110_no_dre_left_enable; nregs = ARRAY_SIZE(wm5110_no_dre_left_enable); - priv->out_up_delay += 10; + priv->out_up_delay += 10000; } break; case ARIZONA_OUT1R_ENA_SHIFT: @@ -312,7 +312,7 @@ static int wm5110_hp_pre_enable(struct snd_soc_dapm_widget *w) } else { wseq = wm5110_no_dre_right_enable; nregs = ARRAY_SIZE(wm5110_no_dre_right_enable); - priv->out_up_delay += 10; + priv->out_up_delay += 10000; } break; default: @@ -338,7 +338,7 @@ static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w) snd_soc_component_update_bits(component, ARIZONA_SPARE_TRIGGERS, ARIZONA_WS_TRG1, 0); - priv->out_down_delay += 27; + priv->out_down_delay += 27000; } break; case ARIZONA_OUT1R_ENA_SHIFT: @@ -350,7 +350,7 @@ static int wm5110_hp_pre_disable(struct snd_soc_dapm_widget *w) snd_soc_component_update_bits(component, ARIZONA_SPARE_TRIGGERS, ARIZONA_WS_TRG2, 0); - priv->out_down_delay += 27; + priv->out_down_delay += 27000; } break; default: diff --git a/sound/soc/fsl/fsl_audmix.c b/sound/soc/fsl/fsl_audmix.c index 672148dd4b23..acb499a5043c 100644 --- a/sound/soc/fsl/fsl_audmix.c +++ b/sound/soc/fsl/fsl_audmix.c @@ -492,11 +492,17 @@ static int fsl_audmix_probe(struct platform_device *pdev) goto err_disable_pm; } - priv->pdev = platform_device_register_data(dev, "imx-audmix", 0, NULL, 0); - if (IS_ERR(priv->pdev)) { - ret = PTR_ERR(priv->pdev); - dev_err(dev, "failed to register platform: %d\n", ret); - goto err_disable_pm; + /* + * If dais property exist, then register the imx-audmix card driver. + * otherwise, it should be linked by audio graph card. + */ + if (of_find_property(pdev->dev.of_node, "dais", NULL)) { + priv->pdev = platform_device_register_data(dev, "imx-audmix", 0, NULL, 0); + if (IS_ERR(priv->pdev)) { + ret = PTR_ERR(priv->pdev); + dev_err(dev, "failed to register platform: %d\n", ret); + goto err_disable_pm; + } } return 0; diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c index 3a03f49452fa..8ee6f41563ea 100644 --- a/sound/soc/fsl/fsl_micfil.c +++ b/sound/soc/fsl/fsl_micfil.c @@ -123,6 +123,8 @@ static int micfil_set_quality(struct fsl_micfil *micfil) case QUALITY_VLOW2: qsel = MICFIL_QSEL_VLOW2_QUALITY; break; + default: + return -EINVAL; } return regmap_update_bits(micfil->regmap, REG_MICFIL_CTRL2, @@ -563,7 +565,7 @@ static irqreturn_t micfil_isr(int irq, void *devid) regmap_write_bits(micfil->regmap, REG_MICFIL_STAT, MICFIL_STAT_CHXF(i), - 1); + MICFIL_STAT_CHXF(i)); } for (i = 0; i < MICFIL_FIFO_NUM; i++) { @@ -598,7 +600,7 @@ static irqreturn_t micfil_err_isr(int irq, void *devid) if (stat_reg & MICFIL_STAT_LOWFREQF) { dev_dbg(&pdev->dev, "isr: ipg_clk_app is too low\n"); regmap_write_bits(micfil->regmap, REG_MICFIL_STAT, - MICFIL_STAT_LOWFREQF, 1); + MICFIL_STAT_LOWFREQF, MICFIL_STAT_LOWFREQF); } return IRQ_HANDLED; diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c index 9c1631c44422..11430f9f4996 100644 --- a/sound/soc/fsl/imx-card.c +++ b/sound/soc/fsl/imx-card.c @@ -517,7 +517,7 @@ static int imx_card_parse_of(struct imx_card_data *data) if (!card->dai_link) return -ENOMEM; - data->link_data = devm_kcalloc(dev, num_links, sizeof(*link), GFP_KERNEL); + data->link_data = devm_kcalloc(dev, num_links, sizeof(*link_data), GFP_KERNEL); if (!data->link_data) return -ENOMEM; @@ -759,6 +759,8 @@ static int imx_card_probe(struct platform_device *pdev) data->dapm_routes[i].sink = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s", i + 1, "Playback"); + if (!data->dapm_routes[i].sink) + return -ENOMEM; data->dapm_routes[i].source = "CPU-Playback"; } } @@ -776,6 +778,8 @@ static int imx_card_probe(struct platform_device *pdev) data->dapm_routes[i].source = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%d %s", i + 1, "Capture"); + if (!data->dapm_routes[i].source) + return -ENOMEM; data->dapm_routes[i].sink = "CPU-Capture"; } } diff --git a/sound/soc/generic/audio-graph-card2.c b/sound/soc/generic/audio-graph-card2.c index 8ac6df645ee6..33f35eaa76a8 100644 --- a/sound/soc/generic/audio-graph-card2.c +++ b/sound/soc/generic/audio-graph-card2.c @@ -249,16 +249,19 @@ static enum graph_type __graph_get_type(struct device_node *lnk) if (of_node_name_eq(np, GRAPH_NODENAME_MULTI)) { ret = GRAPH_MULTI; + fw_devlink_purge_absent_suppliers(&np->fwnode); goto out_put; } if (of_node_name_eq(np, GRAPH_NODENAME_DPCM)) { ret = GRAPH_DPCM; + fw_devlink_purge_absent_suppliers(&np->fwnode); goto out_put; } if (of_node_name_eq(np, GRAPH_NODENAME_C2C)) { ret = GRAPH_C2C; + fw_devlink_purge_absent_suppliers(&np->fwnode); goto out_put; } diff --git a/sound/soc/intel/atom/sst/sst_acpi.c b/sound/soc/intel/atom/sst/sst_acpi.c index 3be64430c256..83fbee88e02a 100644 --- a/sound/soc/intel/atom/sst/sst_acpi.c +++ b/sound/soc/intel/atom/sst/sst_acpi.c @@ -126,6 +126,28 @@ static const struct sst_res_info bytcr_res_info = { .acpi_ipc_irq_index = 0 }; +/* For "LPE0F28" ACPI device found on some Android factory OS models */ +static const struct sst_res_info lpe8086_res_info = { + .shim_offset = 0x140000, + .shim_size = 0x000100, + .shim_phy_addr = SST_BYT_SHIM_PHY_ADDR, + .ssp0_offset = 0xa0000, + .ssp0_size = 0x1000, + .dma0_offset = 0x98000, + .dma0_size = 0x4000, + .dma1_offset = 0x9c000, + .dma1_size = 0x4000, + .iram_offset = 0x0c0000, + .iram_size = 0x14000, + .dram_offset = 0x100000, + .dram_size = 0x28000, + .mbox_offset = 0x144000, + .mbox_size = 0x1000, + .acpi_lpe_res_index = 1, + .acpi_ddr_index = 0, + .acpi_ipc_irq_index = 0 +}; + static struct sst_platform_info byt_rvp_platform_data = { .probe_data = &byt_fwparse_info, .ipc_info = &byt_ipc_info, @@ -269,10 +291,38 @@ static int sst_acpi_probe(struct platform_device *pdev) mach->pdata = &chv_platform_data; pdata = mach->pdata; - ret = kstrtouint(id->id, 16, &dev_id); - if (ret < 0) { - dev_err(dev, "Unique device id conversion error: %d\n", ret); - return ret; + if (!strcmp(id->id, "LPE0F28")) { + struct resource *rsrc; + + /* Use regular BYT SST PCI VID:PID */ + dev_id = 0x80860F28; + byt_rvp_platform_data.res_info = &lpe8086_res_info; + + /* + * The "LPE0F28" ACPI device has separate IO-mem resources for: + * DDR, SHIM, MBOX, IRAM, DRAM, CFG + * None of which covers the entire LPE base address range. + * lpe8086_res_info.acpi_lpe_res_index points to the SHIM. + * Patch this to cover the entire base address range as expected + * by sst_platform_get_resources(). + */ + rsrc = platform_get_resource(pdev, IORESOURCE_MEM, + pdata->res_info->acpi_lpe_res_index); + if (!rsrc) { + dev_err(dev, "Invalid SHIM base\n"); + return -EIO; + } + rsrc->start -= pdata->res_info->shim_offset; + rsrc->end = rsrc->start + 0x200000 - 1; + } else { + ret = kstrtouint(id->id, 16, &dev_id); + if (ret < 0) { + dev_err(dev, "Unique device id conversion error: %d\n", ret); + return ret; + } + + if (soc_intel_is_byt_cr(pdev)) + byt_rvp_platform_data.res_info = &bytcr_res_info; } dev_dbg(dev, "ACPI device id: %x\n", dev_id); @@ -281,11 +331,6 @@ static int sst_acpi_probe(struct platform_device *pdev) if (ret < 0) return ret; - if (soc_intel_is_byt_cr(pdev)) { - /* override resource info */ - byt_rvp_platform_data.res_info = &bytcr_res_info; - } - /* update machine parameters */ mach->mach_params.acpi_ipc_irq_index = pdata->res_info->acpi_ipc_irq_index; @@ -346,6 +391,7 @@ static int sst_acpi_remove(struct platform_device *pdev) } static const struct acpi_device_id sst_acpi_ids[] = { + { "LPE0F28", (unsigned long)&snd_soc_acpi_intel_baytrail_machines}, { "80860F28", (unsigned long)&snd_soc_acpi_intel_baytrail_machines}, { "808622A8", (unsigned long)&snd_soc_acpi_intel_cherrytrail_machines}, { }, diff --git a/sound/soc/intel/avs/apl.c b/sound/soc/intel/avs/apl.c index f366478a875d..c4a0b9104151 100644 --- a/sound/soc/intel/avs/apl.c +++ b/sound/soc/intel/avs/apl.c @@ -112,7 +112,7 @@ static int apl_coredump(struct avs_dev *adev, union avs_notify_msg *msg) struct apl_log_buffer_layout layout; void __iomem *addr, *buf; size_t dump_size; - u16 offset = 0; + u32 offset = 0; u8 *dump, *pos; dump_size = AVS_FW_REGS_SIZE + msg->ext.coredump.stack_dump_size; diff --git a/sound/soc/intel/avs/pcm.c b/sound/soc/intel/avs/pcm.c index 22f21f3f612d..07428b5755b8 100644 --- a/sound/soc/intel/avs/pcm.c +++ b/sound/soc/intel/avs/pcm.c @@ -540,7 +540,7 @@ static int avs_dai_fe_hw_free(struct snd_pcm_substream *substream, struct snd_so if (ret < 0) dev_dbg(dai->dev, "Failed to free pages!\n"); - return ret; + return 0; } static int avs_dai_fe_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c index ff879e173d51..b00a9fdd7a9c 100644 --- a/sound/soc/intel/boards/bytcr_rt5640.c +++ b/sound/soc/intel/boards/bytcr_rt5640.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -32,6 +33,8 @@ #include "../atom/sst-atom-controls.h" #include "../common/soc-intel-quirks.h" +#define BYT_RT5640_FALLBACK_CODEC_DEV_NAME "i2c-rt5640" + enum { BYT_RT5640_DMIC1_MAP, BYT_RT5640_DMIC2_MAP, @@ -573,6 +576,19 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF2 | BYT_RT5640_MCLK_EN), }, + { /* Acer Aspire SW3-013 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW3-013"), + }, + .driver_data = (void *)(BYT_RT5640_DMIC1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -1119,6 +1135,36 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = { BYT_RT5640_SSP0_AIF2 | BYT_RT5640_MCLK_EN), }, + { + /* Vexia Edu Atla 10 tablet 5V version */ + .matches = { + /* Having all 3 of these not set is somewhat unique */ + DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), + DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + DMI_MATCH(DMI_BOARD_NAME, "To be filled by O.E.M."), + /* Above strings are too generic, also match on BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "05/14/2015"), + }, + .driver_data = (void *)(BYTCR_INPUT_DEFAULTS | + BYT_RT5640_JD_NOT_INV | + BYT_RT5640_SSP0_AIF1 | + BYT_RT5640_MCLK_EN), + }, + { /* Vexia Edu Atla 10 tablet 9V version */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), + /* Above strings are too generic, also match on BIOS date */ + DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"), + }, + .driver_data = (void *)(BYT_RT5640_IN1_MAP | + BYT_RT5640_JD_SRC_JD2_IN4N | + BYT_RT5640_OVCD_TH_2000UA | + BYT_RT5640_OVCD_SF_0P75 | + BYT_RT5640_DIFF_MIC | + BYT_RT5640_SSP0_AIF2 | + BYT_RT5640_MCLK_EN), + }, { /* Voyo Winpad A15 */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), @@ -1687,9 +1733,33 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev) codec_dev = acpi_get_first_physical_node(adev); acpi_dev_put(adev); - if (!codec_dev) - return -EPROBE_DEFER; - priv->codec_dev = get_device(codec_dev); + + if (codec_dev) { + priv->codec_dev = get_device(codec_dev); + } else { + /* + * Special case for Android tablets where the codec i2c_client + * has been manually instantiated by x86_android_tablets.ko due + * to a broken DSDT. + */ + codec_dev = bus_find_device_by_name(&i2c_bus_type, NULL, + BYT_RT5640_FALLBACK_CODEC_DEV_NAME); + if (!codec_dev) + return -EPROBE_DEFER; + + if (!i2c_verify_client(codec_dev)) { + dev_err(dev, "Error '%s' is not an i2c_client\n", + BYT_RT5640_FALLBACK_CODEC_DEV_NAME); + put_device(codec_dev); + } + + /* fixup codec name */ + strscpy(byt_rt5640_codec_name, BYT_RT5640_FALLBACK_CODEC_DEV_NAME, + sizeof(byt_rt5640_codec_name)); + + /* bus_find_device() returns a reference no need to get() */ + priv->codec_dev = codec_dev; + } /* * swap SSP0 if bytcr is detected diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index d03de37e3578..9cd85ab19c55 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -267,6 +267,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { SOF_BT_OFFLOAD_SSP(2) | SOF_SSP_BT_OFFLOAD_PRESENT), }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_PRODUCT_SKU, "0000000000070000"), + }, + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + RT711_JD2_100K), + }, { .callback = sof_sdw_quirk_cb, .matches = { @@ -378,6 +387,15 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = { RT711_JD2 | SOF_SDW_FOUR_SPK), }, + { + .callback = sof_sdw_quirk_cb, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"), + DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B8C"), + }, + .driver_data = (void *)(SOF_SDW_TGL_HDMI | + RT711_JD2), + }, { .callback = sof_sdw_quirk_cb, .matches = { diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c index 01501d5747a7..52495c930ca3 100644 --- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c +++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c @@ -120,8 +120,8 @@ int mtk_afe_pcm_new(struct snd_soc_component *component, struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component); size = afe->mtk_afe_hardware->buffer_bytes_max; - snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, - afe->dev, size, size); + snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV, afe->dev, 0, size); + return 0; } EXPORT_SYMBOL_GPL(mtk_afe_pcm_new); diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h index dd78600fc7b0..b96116ea8116 100644 --- a/sound/soc/qcom/lpass.h +++ b/sound/soc/qcom/lpass.h @@ -13,10 +13,11 @@ #include #include #include +#include #include "lpass-hdmi.h" #define LPASS_AHBIX_CLOCK_FREQUENCY 131072000 -#define LPASS_MAX_PORTS (LPASS_CDC_DMA_VA_TX8 + 1) +#define LPASS_MAX_PORTS (DISPLAY_PORT_RX_7 + 1) #define LPASS_MAX_MI2S_PORTS (8) #define LPASS_MAX_DMA_CHANNELS (8) #define LPASS_MAX_HDMI_DMA_CHANNELS (4) diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c index 8bb7452b8f18..8fe38f842e7a 100644 --- a/sound/soc/qcom/qdsp6/q6afe-dai.c +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c @@ -496,7 +496,7 @@ static int q6afe_mi2s_set_sysclk(struct snd_soc_dai *dai, static const struct snd_soc_dapm_route q6afe_dapm_routes[] = { {"HDMI Playback", NULL, "HDMI_RX"}, - {"Display Port Playback", NULL, "DISPLAY_PORT_RX"}, + {"DISPLAY_PORT_RX_0 Playback", NULL, "DISPLAY_PORT_RX"}, {"Slimbus Playback", NULL, "SLIMBUS_0_RX"}, {"Slimbus1 Playback", NULL, "SLIMBUS_1_RX"}, {"Slimbus2 Playback", NULL, "SLIMBUS_2_RX"}, diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c index 7f02f5b2c33f..de99c6920df8 100644 --- a/sound/soc/qcom/qdsp6/q6apm-dai.c +++ b/sound/soc/qcom/qdsp6/q6apm-dai.c @@ -24,8 +24,8 @@ #define PLAYBACK_MIN_PERIOD_SIZE 128 #define CAPTURE_MIN_NUM_PERIODS 2 #define CAPTURE_MAX_NUM_PERIODS 8 -#define CAPTURE_MAX_PERIOD_SIZE 4096 -#define CAPTURE_MIN_PERIOD_SIZE 320 +#define CAPTURE_MAX_PERIOD_SIZE 65536 +#define CAPTURE_MIN_PERIOD_SIZE 6144 #define BUFFER_BYTES_MAX (PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE) #define BUFFER_BYTES_MIN (PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE) #define SID_MASK_DEFAULT 0xF @@ -292,13 +292,14 @@ static int q6apm_dai_open(struct snd_soc_component *component, } } - ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 32); + /* setup 10ms latency to accommodate DSP restrictions */ + ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 480); if (ret < 0) { dev_err(dev, "constraint for period bytes step ret = %d\n", ret); goto err; } - ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 32); + ret = snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 480); if (ret < 0) { dev_err(dev, "constraint for buffer bytes step ret = %d\n", ret); goto err; diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c index 5fc8088e63c8..9d314f412abc 100644 --- a/sound/soc/qcom/qdsp6/q6asm-dai.c +++ b/sound/soc/qcom/qdsp6/q6asm-dai.c @@ -902,9 +902,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component, if (ret < 0) { dev_err(dev, "q6asm_open_write failed\n"); - q6asm_audio_client_free(prtd->audio_client); - prtd->audio_client = NULL; - return ret; + goto open_err; } } @@ -913,7 +911,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component, prtd->session_id, dir); if (ret) { dev_err(dev, "Stream reg failed ret:%d\n", ret); - return ret; + goto q6_err; } ret = __q6asm_dai_compr_set_codec_params(component, stream, @@ -921,7 +919,7 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component, prtd->stream_id); if (ret) { dev_err(dev, "codec param setup failed ret:%d\n", ret); - return ret; + goto q6_err; } ret = q6asm_map_memory_regions(dir, prtd->audio_client, prtd->phys, @@ -930,12 +928,21 @@ static int q6asm_dai_compr_set_params(struct snd_soc_component *component, if (ret < 0) { dev_err(dev, "Buffer Mapping failed ret:%d\n", ret); - return -ENOMEM; + ret = -ENOMEM; + goto q6_err; } prtd->state = Q6ASM_STREAM_RUNNING; return 0; + +q6_err: + q6asm_cmd(prtd->audio_client, prtd->stream_id, CMD_CLOSE); + +open_err: + q6asm_audio_client_free(prtd->audio_client); + prtd->audio_client = NULL; + return ret; } static int q6asm_dai_compr_set_metadata(struct snd_soc_component *component, diff --git a/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.c b/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.c index f67c16fd90b9..ac937a6bf909 100644 --- a/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.c +++ b/sound/soc/qcom/qdsp6/q6dsp-lpass-ports.c @@ -79,6 +79,22 @@ .id = did, \ } +#define Q6AFE_DP_RX_DAI(did) { \ + .playback = { \ + .stream_name = #did" Playback", \ + .rates = SNDRV_PCM_RATE_48000 | \ + SNDRV_PCM_RATE_96000 | \ + SNDRV_PCM_RATE_192000, \ + .formats = SNDRV_PCM_FMTBIT_S16_LE | \ + SNDRV_PCM_FMTBIT_S24_LE, \ + .channels_min = 2, \ + .channels_max = 8, \ + .rate_min = 48000, \ + .rate_max = 192000, \ + }, \ + .name = #did, \ + .id = did, \ + } static struct snd_soc_dai_driver q6dsp_audio_fe_dais[] = { { @@ -528,22 +544,14 @@ static struct snd_soc_dai_driver q6dsp_audio_fe_dais[] = { Q6AFE_TDM_CAP_DAI("Quinary", 5, QUINARY_TDM_TX_5), Q6AFE_TDM_CAP_DAI("Quinary", 6, QUINARY_TDM_TX_6), Q6AFE_TDM_CAP_DAI("Quinary", 7, QUINARY_TDM_TX_7), - { - .playback = { - .stream_name = "Display Port Playback", - .rates = SNDRV_PCM_RATE_48000 | - SNDRV_PCM_RATE_96000 | - SNDRV_PCM_RATE_192000, - .formats = SNDRV_PCM_FMTBIT_S16_LE | - SNDRV_PCM_FMTBIT_S24_LE, - .channels_min = 2, - .channels_max = 8, - .rate_max = 192000, - .rate_min = 48000, - }, - .id = DISPLAY_PORT_RX, - .name = "DISPLAY_PORT", - }, + Q6AFE_DP_RX_DAI(DISPLAY_PORT_RX_0), + Q6AFE_DP_RX_DAI(DISPLAY_PORT_RX_1), + Q6AFE_DP_RX_DAI(DISPLAY_PORT_RX_2), + Q6AFE_DP_RX_DAI(DISPLAY_PORT_RX_3), + Q6AFE_DP_RX_DAI(DISPLAY_PORT_RX_4), + Q6AFE_DP_RX_DAI(DISPLAY_PORT_RX_5), + Q6AFE_DP_RX_DAI(DISPLAY_PORT_RX_6), + Q6AFE_DP_RX_DAI(DISPLAY_PORT_RX_7), Q6AFE_CDC_DMA_RX_DAI(WSA_CODEC_DMA_RX_0), Q6AFE_CDC_DMA_TX_DAI(WSA_CODEC_DMA_TX_0), Q6AFE_CDC_DMA_RX_DAI(WSA_CODEC_DMA_RX_1), @@ -603,6 +611,9 @@ struct snd_soc_dai_driver *q6dsp_audio_ports_set_config(struct device *dev, case DISPLAY_PORT_RX: q6dsp_audio_fe_dais[i].ops = cfg->q6hdmi_ops; break; + case DISPLAY_PORT_RX_1 ... DISPLAY_PORT_RX_7: + q6dsp_audio_fe_dais[i].ops = cfg->q6hdmi_ops; + break; case SLIMBUS_0_RX ... SLIMBUS_6_TX: q6dsp_audio_fe_dais[i].ops = cfg->q6slim_ops; break; diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c index 41be09a07ca7..65e51b6b46ff 100644 --- a/sound/soc/qcom/sm8250.c +++ b/sound/soc/qcom/sm8250.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -39,9 +40,11 @@ static int sm8250_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd, SNDRV_PCM_HW_PARAM_RATE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); rate->min = rate->max = 48000; channels->min = channels->max = 2; + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); return 0; } diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig index 2a61e620cd3b..a529852144f9 100644 --- a/sound/soc/samsung/Kconfig +++ b/sound/soc/samsung/Kconfig @@ -220,8 +220,9 @@ config SND_SOC_SAMSUNG_TM2_WM5110 config SND_SOC_SAMSUNG_ARIES_WM8994 tristate "SoC I2S Audio support for WM8994 on Aries" - depends on SND_SOC_SAMSUNG && MFD_WM8994 && IIO && EXTCON + depends on SND_SOC_SAMSUNG && I2C && IIO && EXTCON select SND_SOC_BT_SCO + select MFD_WM8994 select SND_SOC_WM8994 select SND_SAMSUNG_I2S help @@ -233,8 +234,9 @@ config SND_SOC_SAMSUNG_ARIES_WM8994 config SND_SOC_SAMSUNG_MIDAS_WM1811 tristate "SoC I2S Audio support for Midas boards" - depends on SND_SOC_SAMSUNG + depends on SND_SOC_SAMSUNG && I2C select SND_SAMSUNG_I2S + select MFD_WM8994 select SND_SOC_WM8994 help Say Y if you want to add support for SoC audio on the Midas boards. diff --git a/sound/soc/samsung/midas_wm1811.c b/sound/soc/samsung/midas_wm1811.c index 6931b9a45b3e..126098fdcf1b 100644 --- a/sound/soc/samsung/midas_wm1811.c +++ b/sound/soc/samsung/midas_wm1811.c @@ -38,6 +38,17 @@ struct midas_priv { struct snd_soc_jack headset_jack; }; +static struct snd_soc_jack_pin headset_jack_pins[] = { + { + .pin = "Headphone", + .mask = SND_JACK_HEADPHONE, + }, + { + .pin = "Headset Mic", + .mask = SND_JACK_MICROPHONE, + }, +}; + static int midas_start_fll1(struct snd_soc_pcm_runtime *rtd, unsigned int rate) { struct snd_soc_card *card = rtd->card; @@ -261,6 +272,7 @@ static const struct snd_soc_dapm_widget midas_dapm_widgets[] = { SND_SOC_DAPM_LINE("HDMI", NULL), SND_SOC_DAPM_LINE("FM In", midas_fm_set), + SND_SOC_DAPM_HP("Headphone", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), SND_SOC_DAPM_MIC("Main Mic", midas_mic_bias), SND_SOC_DAPM_MIC("Sub Mic", midas_submic_bias), @@ -305,11 +317,13 @@ static int midas_late_probe(struct snd_soc_card *card) return ret; } - ret = snd_soc_card_jack_new(card, "Headset", - SND_JACK_HEADSET | SND_JACK_MECHANICAL | - SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | - SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5, - &priv->headset_jack); + ret = snd_soc_card_jack_new_pins(card, "Headset", + SND_JACK_HEADSET | SND_JACK_MECHANICAL | + SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | + SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5, + &priv->headset_jack, + headset_jack_pins, + ARRAY_SIZE(headset_jack_pins)); if (ret) return ret; diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 7e380d71b0f8..0964b4e3fbdf 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c @@ -1694,20 +1694,6 @@ int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io) return 1; } -int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io) -{ - struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); - struct rsnd_priv *priv = rsnd_io_to_priv(io); - struct device *dev = rsnd_priv_to_dev(priv); - - if (!runtime) { - dev_warn(dev, "Can't update kctrl when idle\n"); - return 0; - } - - return 1; -} - struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg) { cfg->cfg.val = cfg->val; diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h index f8ef6836ef84..690f4932357c 100644 --- a/sound/soc/sh/rcar/rsnd.h +++ b/sound/soc/sh/rcar/rsnd.h @@ -742,7 +742,6 @@ struct rsnd_kctrl_cfg_s { #define rsnd_kctrl_vals(x) ((x).val) /* = (x).cfg.val[0] */ int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io); -int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io); struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg); struct rsnd_kctrl_cfg *rsnd_kctrl_init_s(struct rsnd_kctrl_cfg_s *cfg); int rsnd_kctrl_new(struct rsnd_mod *mod, diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c index f832165e46bc..e985681363e2 100644 --- a/sound/soc/sh/rcar/src.c +++ b/sound/soc/sh/rcar/src.c @@ -34,6 +34,7 @@ struct rsnd_src { struct rsnd_mod *dma; struct rsnd_kctrl_cfg_s sen; /* sync convert enable */ struct rsnd_kctrl_cfg_s sync; /* sync convert */ + u32 current_sync_rate; int irq; }; @@ -99,7 +100,7 @@ static u32 rsnd_src_convert_rate(struct rsnd_dai_stream *io, if (!rsnd_src_sync_is_enabled(mod)) return rsnd_io_converted_rate(io); - convert_rate = src->sync.val; + convert_rate = src->current_sync_rate; if (!convert_rate) convert_rate = rsnd_io_converted_rate(io); @@ -200,13 +201,73 @@ static const u32 chan222222[] = { static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, struct rsnd_mod *mod) { + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); + struct rsnd_priv *priv = rsnd_mod_to_priv(mod); + struct rsnd_src *src = rsnd_mod_to_src(mod); + u32 fin, fout, new_rate; + int inc, cnt, rate; + u64 base, val; + + if (!runtime) + return; + + if (!rsnd_src_sync_is_enabled(mod)) + return; + + fin = rsnd_src_get_in_rate(priv, io); + fout = rsnd_src_get_out_rate(priv, io); + + new_rate = src->sync.val; + + if (!new_rate) + new_rate = fout; + + /* Do nothing if no diff */ + if (new_rate == src->current_sync_rate) + return; + + /* + * SRCm_IFSVR::INTIFS can change within 1% + * see + * SRCm_IFSVR::INTIFS Note + */ + inc = fout / 100; + cnt = abs(new_rate - fout) / inc; + if (fout > new_rate) + inc *= -1; + + /* + * After start running SRC, we can update only SRC_IFSVR + * for Synchronous Mode + */ + base = (u64)0x0400000 * fin; + rate = fout; + for (int i = 0; i < cnt; i++) { + val = base; + rate += inc; + do_div(val, rate); + + rsnd_mod_write(mod, SRC_IFSVR, val); + } + val = base; + do_div(val, new_rate); + + rsnd_mod_write(mod, SRC_IFSVR, val); + + /* update current_sync_rate */ + src->current_sync_rate = new_rate; +} + +static void rsnd_src_init_convert_rate(struct rsnd_dai_stream *io, + struct rsnd_mod *mod) +{ + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); struct rsnd_priv *priv = rsnd_mod_to_priv(mod); struct device *dev = rsnd_priv_to_dev(priv); - struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); int is_play = rsnd_io_is_play(io); int use_src = 0; u32 fin, fout; - u32 ifscr, fsrate, adinr; + u32 ifscr, adinr; u32 cr, route; u32 i_busif, o_busif, tmp; const u32 *bsdsr_table; @@ -244,26 +305,15 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, adinr = rsnd_get_adinr_bit(mod, io) | chan; /* - * SRC_IFSCR / SRC_IFSVR - */ - ifscr = 0; - fsrate = 0; - if (use_src) { - u64 n; - - ifscr = 1; - n = (u64)0x0400000 * fin; - do_div(n, fout); - fsrate = n; - } - - /* + * SRC_IFSCR * SRC_SRCCR / SRC_ROUTE_MODE0 */ + ifscr = 0; cr = 0x00011110; route = 0x0; if (use_src) { route = 0x1; + ifscr = 0x1; if (rsnd_src_sync_is_enabled(mod)) { cr |= 0x1; @@ -334,7 +384,6 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, rsnd_mod_write(mod, SRC_SRCIR, 1); /* initialize */ rsnd_mod_write(mod, SRC_ADINR, adinr); rsnd_mod_write(mod, SRC_IFSCR, ifscr); - rsnd_mod_write(mod, SRC_IFSVR, fsrate); rsnd_mod_write(mod, SRC_SRCCR, cr); rsnd_mod_write(mod, SRC_BSDSR, bsdsr_table[idx]); rsnd_mod_write(mod, SRC_BSISR, bsisr_table[idx]); @@ -347,6 +396,9 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io, rsnd_adg_set_src_timesel_gen2(mod, io, fin, fout); + /* update SRC_IFSVR */ + rsnd_src_set_convert_rate(io, mod); + return; convert_rate_err: @@ -466,7 +518,8 @@ static int rsnd_src_init(struct rsnd_mod *mod, int ret; /* reset sync convert_rate */ - src->sync.val = 0; + src->sync.val = + src->current_sync_rate = 0; ret = rsnd_mod_power_on(mod); if (ret < 0) @@ -474,7 +527,7 @@ static int rsnd_src_init(struct rsnd_mod *mod, rsnd_src_activation(mod); - rsnd_src_set_convert_rate(io, mod); + rsnd_src_init_convert_rate(io, mod); rsnd_src_status_clear(mod); @@ -492,7 +545,8 @@ static int rsnd_src_quit(struct rsnd_mod *mod, rsnd_mod_power_off(mod); /* reset sync convert_rate */ - src->sync.val = 0; + src->sync.val = + src->current_sync_rate = 0; return 0; } @@ -530,6 +584,22 @@ static irqreturn_t rsnd_src_interrupt(int irq, void *data) return IRQ_HANDLED; } +static int rsnd_src_kctrl_accept_runtime(struct rsnd_dai_stream *io) +{ + struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io); + + if (!runtime) { + struct rsnd_priv *priv = rsnd_io_to_priv(io); + struct device *dev = rsnd_priv_to_dev(priv); + + dev_warn(dev, "\"SRC Out Rate\" can use during running\n"); + + return 0; + } + + return 1; +} + static int rsnd_src_probe_(struct rsnd_mod *mod, struct rsnd_dai_stream *io, struct rsnd_priv *priv) @@ -584,7 +654,7 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod, "SRC Out Rate Switch" : "SRC In Rate Switch", rsnd_kctrl_accept_anytime, - rsnd_src_set_convert_rate, + rsnd_src_init_convert_rate, &src->sen, 1); if (ret < 0) return ret; @@ -593,7 +663,7 @@ static int rsnd_src_pcm_new(struct rsnd_mod *mod, rsnd_io_is_play(io) ? "SRC Out Rate" : "SRC In Rate", - rsnd_kctrl_accept_runtime, + rsnd_src_kctrl_accept_runtime, rsnd_src_set_convert_rate, &src->sync, 192000); diff --git a/sound/soc/sh/rz-ssi.c b/sound/soc/sh/rz-ssi.c index 5d6bae33ae34..12e063c29a2a 100644 --- a/sound/soc/sh/rz-ssi.c +++ b/sound/soc/sh/rz-ssi.c @@ -244,8 +244,7 @@ static void rz_ssi_stream_quit(struct rz_ssi_priv *ssi, static int rz_ssi_clk_setup(struct rz_ssi_priv *ssi, unsigned int rate, unsigned int channels) { - static s8 ckdv[16] = { 1, 2, 4, 8, 16, 32, 64, 128, - 6, 12, 24, 48, 96, -1, -1, -1 }; + static u8 ckdv[] = { 1, 2, 4, 8, 16, 32, 64, 128, 6, 12, 24, 48, 96 }; unsigned int channel_bits = 32; /* System Word Length */ unsigned long bclk_rate = rate * channels * channel_bits; unsigned int div; @@ -484,6 +483,8 @@ static int rz_ssi_pio_send(struct rz_ssi_priv *ssi, struct rz_ssi_stream *strm) sample_space = strm->fifo_sample_size; ssifsr = rz_ssi_reg_readl(ssi, SSIFSR); sample_space -= (ssifsr >> SSIFSR_TDC_SHIFT) & SSIFSR_TDC_MASK; + if (sample_space < 0) + return -EINVAL; /* Only add full frames at a time */ while (frames_left && (sample_space >= runtime->channels)) { diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index 6a4101dc15a5..b13370d2ec1d 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2837,7 +2837,7 @@ int snd_soc_of_parse_pin_switches(struct snd_soc_card *card, const char *prop) unsigned int i, nb_controls; int ret; - if (!of_property_read_bool(dev->of_node, prop)) + if (!of_property_present(dev->of_node, prop)) return 0; strings = devm_kcalloc(dev, nb_controls_max, @@ -2911,23 +2911,17 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np, if (rx_mask) snd_soc_of_get_slot_mask(np, "dai-tdm-slot-rx-mask", rx_mask); - if (of_property_read_bool(np, "dai-tdm-slot-num")) { - ret = of_property_read_u32(np, "dai-tdm-slot-num", &val); - if (ret) - return ret; + ret = of_property_read_u32(np, "dai-tdm-slot-num", &val); + if (ret && ret != -EINVAL) + return ret; + if (!ret && slots) + *slots = val; - if (slots) - *slots = val; - } - - if (of_property_read_bool(np, "dai-tdm-slot-width")) { - ret = of_property_read_u32(np, "dai-tdm-slot-width", &val); - if (ret) - return ret; - - if (slot_width) - *slot_width = val; - } + ret = of_property_read_u32(np, "dai-tdm-slot-width", &val); + if (ret && ret != -EINVAL) + return ret; + if (!ret && slot_width) + *slot_width = val; return 0; } @@ -3143,10 +3137,10 @@ unsigned int snd_soc_daifmt_parse_format(struct device_node *np, * SND_SOC_DAIFMT_INV_MASK area */ snprintf(prop, sizeof(prop), "%sbitclock-inversion", prefix); - bit = !!of_get_property(np, prop, NULL); + bit = of_property_read_bool(np, prop); snprintf(prop, sizeof(prop), "%sframe-inversion", prefix); - frame = !!of_get_property(np, prop, NULL); + frame = of_property_read_bool(np, prop); switch ((bit << 4) + frame) { case 0x11: @@ -3183,12 +3177,12 @@ unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np, * check "[prefix]frame-master" */ snprintf(prop, sizeof(prop), "%sbitclock-master", prefix); - bit = !!of_get_property(np, prop, NULL); + bit = of_property_present(np, prop); if (bit && bitclkmaster) *bitclkmaster = of_parse_phandle(np, prop, 0); snprintf(prop, sizeof(prop), "%sframe-master", prefix); - frame = !!of_get_property(np, prop, NULL); + frame = of_property_present(np, prop); if (frame && framemaster) *framemaster = of_parse_phandle(np, prop, 0); diff --git a/sound/soc/soc-dai.c b/sound/soc/soc-dai.c index 49752af0e205..ba38b6e6b264 100644 --- a/sound/soc/soc-dai.c +++ b/sound/soc/soc-dai.c @@ -270,10 +270,11 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, if (dai->driver->ops && dai->driver->ops->xlate_tdm_slot_mask) - dai->driver->ops->xlate_tdm_slot_mask(slots, - &tx_mask, &rx_mask); + ret = dai->driver->ops->xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); else - snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); + ret = snd_soc_xlate_tdm_slot_mask(slots, &tx_mask, &rx_mask); + if (ret) + goto err; dai->tx_mask = tx_mask; dai->rx_mask = rx_mask; @@ -282,6 +283,7 @@ int snd_soc_dai_set_tdm_slot(struct snd_soc_dai *dai, dai->driver->ops->set_tdm_slot) ret = dai->driver->ops->set_tdm_slot(dai, tx_mask, rx_mask, slots, slot_width); +err: return soc_dai_ret(dai, ret); } EXPORT_SYMBOL_GPL(snd_soc_dai_set_tdm_slot); diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index b27e89ff6a16..eff1355cc3df 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -336,7 +336,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (ucontrol->value.integer.value[0] < 0) return -EINVAL; val = ucontrol->value.integer.value[0]; - if (mc->platform_max && ((int)val + min) > mc->platform_max) + if (mc->platform_max && val > mc->platform_max) return -EINVAL; if (val > max - min) return -EINVAL; @@ -349,7 +349,7 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (ucontrol->value.integer.value[1] < 0) return -EINVAL; val2 = ucontrol->value.integer.value[1]; - if (mc->platform_max && ((int)val2 + min) > mc->platform_max) + if (mc->platform_max && val2 > mc->platform_max) return -EINVAL; if (val2 > max - min) return -EINVAL; @@ -502,17 +502,16 @@ int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; - int platform_max; - int min = mc->min; + int max; - if (!mc->platform_max) - mc->platform_max = mc->max; - platform_max = mc->platform_max; + max = mc->max - mc->min; + if (mc->platform_max && mc->platform_max < max) + max = mc->platform_max; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1; uinfo->value.integer.min = 0; - uinfo->value.integer.max = platform_max - min; + uinfo->value.integer.max = max; return 0; } @@ -639,6 +638,33 @@ int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol, } EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range); +static int snd_soc_clip_to_platform_max(struct snd_kcontrol *kctl) +{ + struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value; + struct snd_ctl_elem_value uctl; + int ret; + + if (!mc->platform_max) + return 0; + + ret = kctl->get(kctl, &uctl); + if (ret < 0) + return ret; + + if (uctl.value.integer.value[0] > mc->platform_max) + uctl.value.integer.value[0] = mc->platform_max; + + if (snd_soc_volsw_is_stereo(mc) && + uctl.value.integer.value[1] > mc->platform_max) + uctl.value.integer.value[1] = mc->platform_max; + + ret = kctl->put(kctl, &uctl); + if (ret < 0) + return ret; + + return 0; +} + /** * snd_soc_limit_volume - Set new limit to an existing volume control. * @@ -663,7 +689,7 @@ int snd_soc_limit_volume(struct snd_soc_card *card, struct soc_mixer_control *mc = (struct soc_mixer_control *)kctl->private_value; if (max <= mc->max - mc->min) { mc->platform_max = max; - ret = 0; + ret = snd_soc_clip_to_platform_max(kctl); } } return ret; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index f3964060a044..5a0fec90ae25 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -906,7 +906,13 @@ static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd, snd_soc_dai_digital_mute(dai, 0, substream->stream); out: - return soc_pcm_ret(rtd, ret); + /* + * Don't use soc_pcm_ret() on .prepare callback to lower error log severity + * + * We don't want to log an error since we do not want to give userspace a way to do a + * denial-of-service attack on the syslog / diskspace. + */ + return ret; } /* PCM prepare ops for non-DPCM streams */ @@ -918,6 +924,13 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream) snd_soc_dpcm_mutex_lock(rtd); ret = __soc_pcm_prepare(rtd, substream); snd_soc_dpcm_mutex_unlock(rtd); + + /* + * Don't use soc_pcm_ret() on .prepare callback to lower error log severity + * + * We don't want to log an error since we do not want to give userspace a way to do a + * denial-of-service attack on the syslog / diskspace. + */ return ret; } @@ -1486,10 +1499,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream, /* * Filter for systems with 'component_chaining' enabled. * This helps to avoid unnecessary re-configuration of an - * already active BE on such systems. + * already active BE on such systems and ensures the BE DAI + * widget is powered ON after hw_params() BE DAI callback. */ if (fe->card->component_chaining && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE)) continue; @@ -2422,7 +2438,13 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; } - return soc_pcm_ret(fe, ret); + /* + * Don't use soc_pcm_ret() on .prepare callback to lower error log severity + * + * We don't want to log an error since we do not want to give userspace a way to do a + * denial-of-service attack on the syslog / diskspace. + */ + return ret; } static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream) @@ -2459,7 +2481,13 @@ out: dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO); snd_soc_dpcm_mutex_unlock(fe); - return soc_pcm_ret(fe, ret); + /* + * Don't use soc_pcm_ret() on .prepare callback to lower error log severity + * + * We don't want to log an error since we do not want to give userspace a way to do a + * denial-of-service attack on the syslog / diskspace. + */ + return ret; } static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream) diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c index a0dfd7de431f..a75f81116643 100644 --- a/sound/soc/sof/intel/hda-codec.c +++ b/sound/soc/sof/intel/hda-codec.c @@ -282,6 +282,7 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev) } EXPORT_SYMBOL_NS(hda_codec_i915_exit, SND_SOC_SOF_HDA_AUDIO_CODEC_I915); +MODULE_SOFTDEP("pre: snd-hda-codec-hdmi"); #endif MODULE_LICENSE("Dual BSD/GPL"); diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c index eb31b49e6597..0629aa5f2fe4 100644 --- a/sound/soc/stm/stm32_sai_sub.c +++ b/sound/soc/stm/stm32_sai_sub.c @@ -317,7 +317,7 @@ static int stm32_sai_get_clk_div(struct stm32_sai_sub_data *sai, int div; div = DIV_ROUND_CLOSEST(input_rate, output_rate); - if (div > SAI_XCR1_MCKDIV_MAX(version)) { + if (div > SAI_XCR1_MCKDIV_MAX(version) || div <= 0) { dev_err(&sai->pdev->dev, "Divider %d out of range\n", div); return -EINVAL; } @@ -378,8 +378,8 @@ static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate, int div; div = stm32_sai_get_clk_div(sai, *prate, rate); - if (div < 0) - return div; + if (div <= 0) + return -EINVAL; mclk->freq = *prate / div; diff --git a/sound/soc/sunxi/sun4i-codec.c b/sound/soc/sunxi/sun4i-codec.c index 835dc3404367..1e310958f8c0 100644 --- a/sound/soc/sunxi/sun4i-codec.c +++ b/sound/soc/sunxi/sun4i-codec.c @@ -25,6 +25,7 @@ #include #include +#include #include #include #include @@ -239,6 +240,7 @@ struct sun4i_codec { struct clk *clk_module; struct reset_control *rst; struct gpio_desc *gpio_pa; + struct gpio_desc *gpio_hp; /* ADC_FIFOC register is at different offset on different SoCs */ struct regmap_field *reg_adc_fifoc; @@ -1273,6 +1275,49 @@ static struct snd_soc_dai_driver dummy_cpu_dai = { }, }; +static struct snd_soc_jack sun4i_headphone_jack; + +static struct snd_soc_jack_pin sun4i_headphone_jack_pins[] = { + { .pin = "Headphone", .mask = SND_JACK_HEADPHONE }, +}; + +static struct snd_soc_jack_gpio sun4i_headphone_jack_gpio = { + .name = "hp-det", + .report = SND_JACK_HEADPHONE, + .debounce_time = 150, +}; + +static int sun4i_codec_machine_init(struct snd_soc_pcm_runtime *rtd) +{ + struct snd_soc_card *card = rtd->card; + struct sun4i_codec *scodec = snd_soc_card_get_drvdata(card); + int ret; + + if (scodec->gpio_hp) { + ret = snd_soc_card_jack_new_pins(card, "Headphone Jack", + SND_JACK_HEADPHONE, + &sun4i_headphone_jack, + sun4i_headphone_jack_pins, + ARRAY_SIZE(sun4i_headphone_jack_pins)); + if (ret) { + dev_err(rtd->dev, + "Headphone jack creation failed: %d\n", ret); + return ret; + } + + sun4i_headphone_jack_gpio.desc = scodec->gpio_hp; + ret = snd_soc_jack_add_gpios(&sun4i_headphone_jack, 1, + &sun4i_headphone_jack_gpio); + + if (ret) { + dev_err(rtd->dev, "Headphone GPIO not added: %d\n", ret); + return ret; + } + } + + return 0; +} + static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev, int *num_links) { @@ -1298,6 +1343,7 @@ static struct snd_soc_dai_link *sun4i_codec_create_link(struct device *dev, link->codecs->name = dev_name(dev); link->platforms->name = dev_name(dev); link->dai_fmt = SND_SOC_DAIFMT_I2S; + link->init = sun4i_codec_machine_init; *num_links = 1; @@ -1738,6 +1784,13 @@ static int sun4i_codec_probe(struct platform_device *pdev) return ret; } + scodec->gpio_hp = devm_gpiod_get_optional(&pdev->dev, "hp-det", GPIOD_IN); + if (IS_ERR(scodec->gpio_hp)) { + ret = PTR_ERR(scodec->gpio_hp); + dev_err_probe(&pdev->dev, ret, "Failed to get hp-det gpio\n"); + return ret; + } + /* reg_field setup */ scodec->reg_adc_fifoc = devm_regmap_field_alloc(&pdev->dev, scodec->regmap, diff --git a/sound/soc/sunxi/sun4i-spdif.c b/sound/soc/sunxi/sun4i-spdif.c index 484b0e7c2def..84e7b363ac3b 100644 --- a/sound/soc/sunxi/sun4i-spdif.c +++ b/sound/soc/sunxi/sun4i-spdif.c @@ -177,6 +177,7 @@ struct sun4i_spdif_quirks { unsigned int reg_dac_txdata; bool has_reset; unsigned int val_fctl_ftx; + unsigned int mclk_multiplier; }; struct sun4i_spdif_dev { @@ -314,6 +315,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream, default: return -EINVAL; } + mclk *= host->quirks->mclk_multiplier; ret = clk_set_rate(host->spdif_clk, mclk); if (ret < 0) { @@ -348,6 +350,7 @@ static int sun4i_spdif_hw_params(struct snd_pcm_substream *substream, default: return -EINVAL; } + mclk_div *= host->quirks->mclk_multiplier; reg_val = 0; reg_val |= SUN4I_SPDIF_TXCFG_ASS; @@ -541,24 +544,28 @@ static struct snd_soc_dai_driver sun4i_spdif_dai = { static const struct sun4i_spdif_quirks sun4i_a10_spdif_quirks = { .reg_dac_txdata = SUN4I_SPDIF_TXFIFO, .val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX, + .mclk_multiplier = 1, }; static const struct sun4i_spdif_quirks sun6i_a31_spdif_quirks = { .reg_dac_txdata = SUN4I_SPDIF_TXFIFO, .val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX, .has_reset = true, + .mclk_multiplier = 1, }; static const struct sun4i_spdif_quirks sun8i_h3_spdif_quirks = { .reg_dac_txdata = SUN8I_SPDIF_TXFIFO, .val_fctl_ftx = SUN4I_SPDIF_FCTL_FTX, .has_reset = true, + .mclk_multiplier = 4, }; static const struct sun4i_spdif_quirks sun50i_h6_spdif_quirks = { .reg_dac_txdata = SUN8I_SPDIF_TXFIFO, .val_fctl_ftx = SUN50I_H6_SPDIF_FCTL_FTX, .has_reset = true, + .mclk_multiplier = 1, }; static const struct of_device_id sun4i_spdif_of_match[] = { diff --git a/sound/soc/ti/j721e-evm.c b/sound/soc/ti/j721e-evm.c index 6a969874c927..5e0bdbd34a83 100644 --- a/sound/soc/ti/j721e-evm.c +++ b/sound/soc/ti/j721e-evm.c @@ -182,6 +182,8 @@ static int j721e_configure_refclk(struct j721e_priv *priv, clk_id = J721E_CLK_PARENT_48000; else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_44100]) clk_id = J721E_CLK_PARENT_44100; + else if (!(rate % 11025) && priv->pll_rates[J721E_CLK_PARENT_48000]) + clk_id = J721E_CLK_PARENT_48000; else return ret; diff --git a/sound/usb/6fire/chip.c b/sound/usb/6fire/chip.c index 33e962178c93..d562a30b087f 100644 --- a/sound/usb/6fire/chip.c +++ b/sound/usb/6fire/chip.c @@ -61,8 +61,10 @@ static void usb6fire_chip_abort(struct sfire_chip *chip) } } -static void usb6fire_chip_destroy(struct sfire_chip *chip) +static void usb6fire_card_free(struct snd_card *card) { + struct sfire_chip *chip = card->private_data; + if (chip) { if (chip->pcm) usb6fire_pcm_destroy(chip); @@ -72,8 +74,6 @@ static void usb6fire_chip_destroy(struct sfire_chip *chip) usb6fire_comm_destroy(chip); if (chip->control) usb6fire_control_destroy(chip); - if (chip->card) - snd_card_free(chip->card); } } @@ -136,6 +136,7 @@ static int usb6fire_chip_probe(struct usb_interface *intf, chip->regidx = regidx; chip->intf_count = 1; chip->card = card; + card->private_free = usb6fire_card_free; ret = usb6fire_comm_init(chip); if (ret < 0) @@ -162,7 +163,7 @@ static int usb6fire_chip_probe(struct usb_interface *intf, return 0; destroy_chip: - usb6fire_chip_destroy(chip); + snd_card_free(card); return ret; } @@ -181,7 +182,6 @@ static void usb6fire_chip_disconnect(struct usb_interface *intf) chip->shutdown = true; usb6fire_chip_abort(chip); - usb6fire_chip_destroy(chip); } } } diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 4981753652a7..7a89872aa0cb 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -869,14 +869,20 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev) return 0; } -void snd_usb_caiaq_audio_free(struct snd_usb_caiaqdev *cdev) +void snd_usb_caiaq_audio_disconnect(struct snd_usb_caiaqdev *cdev) { struct device *dev = caiaqdev_to_dev(cdev); dev_dbg(dev, "%s(%p)\n", __func__, cdev); stream_stop(cdev); +} + +void snd_usb_caiaq_audio_free(struct snd_usb_caiaqdev *cdev) +{ + struct device *dev = caiaqdev_to_dev(cdev); + + dev_dbg(dev, "%s(%p)\n", __func__, cdev); free_urbs(cdev->data_urbs_in); free_urbs(cdev->data_urbs_out); kfree(cdev->data_cb_info); } - diff --git a/sound/usb/caiaq/audio.h b/sound/usb/caiaq/audio.h index 869bf6264d6a..07f5d064456c 100644 --- a/sound/usb/caiaq/audio.h +++ b/sound/usb/caiaq/audio.h @@ -3,6 +3,7 @@ #define CAIAQ_AUDIO_H int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev); +void snd_usb_caiaq_audio_disconnect(struct snd_usb_caiaqdev *cdev); void snd_usb_caiaq_audio_free(struct snd_usb_caiaqdev *cdev); #endif /* CAIAQ_AUDIO_H */ diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index 49f63f878e6f..d5c01d3f126e 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c @@ -376,6 +376,17 @@ static void setup_card(struct snd_usb_caiaqdev *cdev) dev_err(dev, "Unable to set up control system (ret=%d)\n", ret); } +static void card_free(struct snd_card *card) +{ + struct snd_usb_caiaqdev *cdev = caiaqdev(card); + +#ifdef CONFIG_SND_USB_CAIAQ_INPUT + snd_usb_caiaq_input_free(cdev); +#endif + snd_usb_caiaq_audio_free(cdev); + usb_reset_device(cdev->chip.dev); +} + static int create_card(struct usb_device *usb_dev, struct usb_interface *intf, struct snd_card **cardp) @@ -489,6 +500,7 @@ static int init_card(struct snd_usb_caiaqdev *cdev) cdev->vendor_name, cdev->product_name, usbpath); setup_card(cdev); + card->private_free = card_free; return 0; err_kill_urb: @@ -534,15 +546,14 @@ static void snd_disconnect(struct usb_interface *intf) snd_card_disconnect(card); #ifdef CONFIG_SND_USB_CAIAQ_INPUT - snd_usb_caiaq_input_free(cdev); + snd_usb_caiaq_input_disconnect(cdev); #endif - snd_usb_caiaq_audio_free(cdev); + snd_usb_caiaq_audio_disconnect(cdev); usb_kill_urb(&cdev->ep1_in_urb); usb_kill_urb(&cdev->midi_out_urb); - snd_card_free(card); - usb_reset_device(interface_to_usbdev(intf)); + snd_card_free_when_closed(card); } diff --git a/sound/usb/caiaq/input.c b/sound/usb/caiaq/input.c index 84f26dce7f5d..a9130891bb69 100644 --- a/sound/usb/caiaq/input.c +++ b/sound/usb/caiaq/input.c @@ -829,15 +829,21 @@ exit_free_idev: return ret; } -void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *cdev) +void snd_usb_caiaq_input_disconnect(struct snd_usb_caiaqdev *cdev) { if (!cdev || !cdev->input_dev) return; usb_kill_urb(cdev->ep4_in_urb); + input_unregister_device(cdev->input_dev); +} + +void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *cdev) +{ + if (!cdev || !cdev->input_dev) + return; + usb_free_urb(cdev->ep4_in_urb); cdev->ep4_in_urb = NULL; - - input_unregister_device(cdev->input_dev); cdev->input_dev = NULL; } diff --git a/sound/usb/caiaq/input.h b/sound/usb/caiaq/input.h index c42891e7be88..fbe267f85d02 100644 --- a/sound/usb/caiaq/input.h +++ b/sound/usb/caiaq/input.h @@ -4,6 +4,7 @@ void snd_usb_caiaq_input_dispatch(struct snd_usb_caiaqdev *cdev, char *buf, unsigned int len); int snd_usb_caiaq_input_init(struct snd_usb_caiaqdev *cdev); +void snd_usb_caiaq_input_disconnect(struct snd_usb_caiaqdev *cdev); void snd_usb_caiaq_input_free(struct snd_usb_caiaqdev *cdev); #endif diff --git a/sound/usb/clock.c b/sound/usb/clock.c index a676ad093d18..f0f1e445cc56 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -36,6 +36,12 @@ union uac23_clock_multiplier_desc { struct uac_clock_multiplier_descriptor v3; }; +/* check whether the descriptor bLength has the minimal length */ +#define DESC_LENGTH_CHECK(p, proto) \ + ((proto) == UAC_VERSION_3 ? \ + ((p)->v3.bLength >= sizeof((p)->v3)) : \ + ((p)->v2.bLength >= sizeof((p)->v2))) + #define GET_VAL(p, proto, field) \ ((proto) == UAC_VERSION_3 ? (p)->v3.field : (p)->v2.field) @@ -58,6 +64,8 @@ static bool validate_clock_source(void *p, int id, int proto) { union uac23_clock_source_desc *cs = p; + if (!DESC_LENGTH_CHECK(cs, proto)) + return false; return GET_VAL(cs, proto, bClockID) == id; } @@ -65,13 +73,27 @@ static bool validate_clock_selector(void *p, int id, int proto) { union uac23_clock_selector_desc *cs = p; - return GET_VAL(cs, proto, bClockID) == id; + if (!DESC_LENGTH_CHECK(cs, proto)) + return false; + if (GET_VAL(cs, proto, bClockID) != id) + return false; + /* additional length check for baCSourceID array (in bNrInPins size) + * and two more fields (which sizes depend on the protocol) + */ + if (proto == UAC_VERSION_3) + return cs->v3.bLength >= sizeof(cs->v3) + cs->v3.bNrInPins + + 4 /* bmControls */ + 2 /* wCSelectorDescrStr */; + else + return cs->v2.bLength >= sizeof(cs->v2) + cs->v2.bNrInPins + + 1 /* bmControls */ + 1 /* iClockSelector */; } static bool validate_clock_multiplier(void *p, int id, int proto) { union uac23_clock_multiplier_desc *cs = p; + if (!DESC_LENGTH_CHECK(cs, proto)) + return false; return GET_VAL(cs, proto, bClockID) == id; } diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 647fa054d8b1..e3684dfa5f1f 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c @@ -403,10 +403,15 @@ static int prepare_inbound_urb(struct snd_usb_endpoint *ep, static void notify_xrun(struct snd_usb_endpoint *ep) { struct snd_usb_substream *data_subs; + struct snd_pcm_substream *psubs; data_subs = READ_ONCE(ep->data_subs); - if (data_subs && data_subs->pcm_substream) - snd_pcm_stop_xrun(data_subs->pcm_substream); + if (!data_subs) + return; + psubs = data_subs->pcm_substream; + if (psubs && psubs->runtime && + psubs->runtime->state == SNDRV_PCM_STATE_RUNNING) + snd_pcm_stop_xrun(psubs); } static struct snd_usb_packet_info * @@ -557,7 +562,10 @@ static void snd_complete_urb(struct urb *urb) push_back_to_ready_list(ep, ctx); clear_bit(ctx->index, &ep->active_mask); snd_usb_queue_pending_output_urbs(ep, false); - atomic_dec(&ep->submitted_urbs); /* decrement at last */ + /* decrement at last, and check xrun */ + if (atomic_dec_and_test(&ep->submitted_urbs) && + !snd_usb_endpoint_implicit_feedback_sink(ep)) + notify_xrun(ep); return; } diff --git a/sound/usb/format.c b/sound/usb/format.c index 3b45d0ee7693..f33d25a4e4cc 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -60,6 +60,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL; /* flag potentially raw DSD capable altsettings */ fp->dsd_raw = true; + /* clear special format bit to avoid "unsupported format" msg below */ + format &= ~UAC2_FORMAT_TYPE_I_RAW_DATA; } format <<= 1; @@ -71,8 +73,11 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, sample_width = as->bBitResolution; sample_bytes = as->bSubslotSize; - if (format & UAC3_FORMAT_TYPE_I_RAW_DATA) + if (format & UAC3_FORMAT_TYPE_I_RAW_DATA) { pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL; + /* clear special format bit to avoid "unsupported format" msg below */ + format &= ~UAC3_FORMAT_TYPE_I_RAW_DATA; + } format <<= 1; break; @@ -258,7 +263,8 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof } /* Jabra Evolve 65 headset */ - if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) { + if (chip->usb_id == USB_ID(0x0b0e, 0x030b) || + chip->usb_id == USB_ID(0x0b0e, 0x030c)) { /* only 48kHz for playback while keeping 16kHz for capture */ if (fp->nr_rates != 1) return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000); diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 2839f6b6f09b..d300cd1f922b 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -489,16 +489,84 @@ static void ch345_broken_sysex_input(struct snd_usb_midi_in_endpoint *ep, /* * CME protocol: like the standard protocol, but SysEx commands are sent as a - * single USB packet preceded by a 0x0F byte. + * single USB packet preceded by a 0x0F byte, as are system realtime + * messages and MIDI Active Sensing. + * Also, multiple messages can be sent in the same packet. */ static void snd_usbmidi_cme_input(struct snd_usb_midi_in_endpoint *ep, uint8_t *buffer, int buffer_length) { - if (buffer_length < 2 || (buffer[0] & 0x0f) != 0x0f) - snd_usbmidi_standard_input(ep, buffer, buffer_length); - else - snd_usbmidi_input_data(ep, buffer[0] >> 4, - &buffer[1], buffer_length - 1); + int remaining = buffer_length; + + /* + * CME send sysex, song position pointer, system realtime + * and active sensing using CIN 0x0f, which in the standard + * is only intended for single byte unparsed data. + * So we need to interpret these here before sending them on. + * By default, we assume single byte data, which is true + * for system realtime (midi clock, start, stop and continue) + * and active sensing, and handle the other (known) cases + * separately. + * In contrast to the standard, CME does not split sysex + * into multiple 4-byte packets, but lumps everything together + * into one. In addition, CME can string multiple messages + * together in the same packet; pressing the Record button + * on an UF6 sends a sysex message directly followed + * by a song position pointer in the same packet. + * For it to have any reasonable meaning, a sysex message + * needs to be at least 3 bytes in length (0xf0, id, 0xf7), + * corresponding to a packet size of 4 bytes, and the ones sent + * by CME devices are 6 or 7 bytes, making the packet fragments + * 7 or 8 bytes long (six or seven bytes plus preceding CN+CIN byte). + * For the other types, the packet size is always 4 bytes, + * as per the standard, with the data size being 3 for SPP + * and 1 for the others. + * Thus all packet fragments are at least 4 bytes long, so we can + * skip anything that is shorter; this also conveniantly skips + * packets with size 0, which CME devices continuously send when + * they have nothing better to do. + * Another quirk is that sometimes multiple messages are sent + * in the same packet. This has been observed for midi clock + * and active sensing i.e. 0x0f 0xf8 0x00 0x00 0x0f 0xfe 0x00 0x00, + * but also multiple note ons/offs, and control change together + * with MIDI clock. Similarly, some sysex messages are followed by + * the song position pointer in the same packet, and occasionally + * additionally by a midi clock or active sensing. + * We handle this by looping over all data and parsing it along the way. + */ + while (remaining >= 4) { + int source_length = 4; /* default */ + + if ((buffer[0] & 0x0f) == 0x0f) { + int data_length = 1; /* default */ + + if (buffer[1] == 0xf0) { + /* Sysex: Find EOX and send on whole message. */ + /* To kick off the search, skip the first + * two bytes (CN+CIN and SYSEX (0xf0). + */ + uint8_t *tmp_buf = buffer + 2; + int tmp_length = remaining - 2; + + while (tmp_length > 1 && *tmp_buf != 0xf7) { + tmp_buf++; + tmp_length--; + } + data_length = tmp_buf - buffer; + source_length = data_length + 1; + } else if (buffer[1] == 0xf2) { + /* Three byte song position pointer */ + data_length = 3; + } + snd_usbmidi_input_data(ep, buffer[0] >> 4, + &buffer[1], data_length); + } else { + /* normal channel events */ + snd_usbmidi_standard_input(ep, buffer, source_length); + } + buffer += source_length; + remaining -= source_length; + } } /* @@ -1145,7 +1213,7 @@ static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) { struct usbmidi_out_port *port = substream->runtime->private_data; - cancel_work_sync(&port->ep->work); + flush_work(&port->ep->work); return substream_open(substream, 0, 0); } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 102a9b3ba3be..4cded91d22a8 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1084,6 +1084,21 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, struct snd_kcontrol *kctl) { struct snd_usb_audio *chip = cval->head.mixer->chip; + + if (chip->quirk_flags & QUIRK_FLAG_MIC_RES_384) { + if (!strcmp(kctl->id.name, "Mic Capture Volume")) { + usb_audio_info(chip, + "set resolution quirk: cval->res = 384\n"); + cval->res = 384; + } + } else if (chip->quirk_flags & QUIRK_FLAG_MIC_RES_16) { + if (!strcmp(kctl->id.name, "Mic Capture Volume")) { + usb_audio_info(chip, + "set resolution quirk: cval->res = 16\n"); + cval->res = 16; + } + } + switch (chip->usb_id) { case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ @@ -1168,27 +1183,6 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, } break; - case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ - case USB_ID(0x046d, 0x0808): - case USB_ID(0x046d, 0x0809): - case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */ - case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ - case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ - case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ - case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ - case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */ - case USB_ID(0x046d, 0x0991): - case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */ - /* Most audio usb devices lie about volume resolution. - * Most Logitech webcams have res = 384. - * Probably there is some logitech magic behind this number --fishor - */ - if (!strcmp(kctl->id.name, "Mic Capture Volume")) { - usb_audio_info(chip, - "set resolution quirk: cval->res = 384\n"); - cval->res = 384; - } - break; case USB_ID(0x0495, 0x3042): /* ESS Technology Asus USB DAC */ if ((strstr(kctl->id.name, "Playback Volume") != NULL) || strstr(kctl->id.name, "Capture Volume") != NULL) { @@ -1197,28 +1191,6 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, cval->res = 1; } break; - case USB_ID(0x1224, 0x2a25): /* Jieli Technology USB PHY 2.0 */ - if (!strcmp(kctl->id.name, "Mic Capture Volume")) { - usb_audio_info(chip, - "set resolution quirk: cval->res = 16\n"); - cval->res = 16; - } - break; - case USB_ID(0x1bcf, 0x2283): /* NexiGo N930AF FHD Webcam */ - case USB_ID(0x03f0, 0x654a): /* HP 320 FHD Webcam */ - if (!strcmp(kctl->id.name, "Mic Capture Volume")) { - usb_audio_info(chip, - "set resolution quirk: cval->res = 16\n"); - cval->res = 16; - } - break; - case USB_ID(0x1bcf, 0x2281): /* HD Webcam */ - if (!strcmp(kctl->id.name, "Mic Capture Volume")) { - usb_audio_info(chip, - "set resolution quirk: cval->res = 16\n"); - cval->res = 16; - } - break; } } diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 23260aa1919d..0e9b5431a47f 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -621,6 +621,16 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = { .id = USB_ID(0x1b1c, 0x0a42), .map = corsair_virtuoso_map, }, + { + /* Corsair HS80 RGB Wireless (wired mode) */ + .id = USB_ID(0x1b1c, 0x0a6a), + .map = corsair_virtuoso_map, + }, + { + /* Corsair HS80 RGB Wireless (wireless mode) */ + .id = USB_ID(0x1b1c, 0x0a6b), + .map = corsair_virtuoso_map, + }, { /* Gigabyte TRX40 Aorus Master (rear panel + front mic) */ .id = USB_ID(0x0414, 0xa001), .map = aorus_master_alc1220vb_map, diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 06965da51dd0..be0b3c8ac705 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -3575,6 +3575,52 @@ static void snd_dragonfly_quirk_db_scale(struct usb_mixer_interface *mixer, } } +/* + * Some Plantronics headsets have control names that don't meet ALSA naming + * standards. This function fixes nonstandard source names. By the time + * this function is called the control name should look like one of these: + * "source names Playback Volume" + * "source names Playback Switch" + * "source names Capture Volume" + * "source names Capture Switch" + * If any of the trigger words are found in the name then the name will + * be changed to: + * "Headset Playback Volume" + * "Headset Playback Switch" + * "Headset Capture Volume" + * "Headset Capture Switch" + * depending on the current suffix. + */ +static void snd_fix_plt_name(struct snd_usb_audio *chip, + struct snd_ctl_elem_id *id) +{ + /* no variant of "Sidetone" should be added to this list */ + static const char * const trigger[] = { + "Earphone", "Microphone", "Receive", "Transmit" + }; + static const char * const suffix[] = { + " Playback Volume", " Playback Switch", + " Capture Volume", " Capture Switch" + }; + int i; + + for (i = 0; i < ARRAY_SIZE(trigger); i++) + if (strstr(id->name, trigger[i])) + goto triggered; + usb_audio_dbg(chip, "no change in %s\n", id->name); + return; + +triggered: + for (i = 0; i < ARRAY_SIZE(suffix); i++) + if (strstr(id->name, suffix[i])) { + usb_audio_dbg(chip, "fixing kctl name %s\n", id->name); + snprintf(id->name, sizeof(id->name), "Headset%s", + suffix[i]); + return; + } + usb_audio_dbg(chip, "something wrong in kctl name %s\n", id->name); +} + void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, struct usb_mixer_elem_info *cval, int unitid, struct snd_kcontrol *kctl) @@ -3592,5 +3638,10 @@ void snd_usb_mixer_fu_apply_quirk(struct usb_mixer_interface *mixer, cval->min_mute = 1; break; } + + /* ALSA-ify some Plantronics headset control names */ + if (USB_ID_VENDOR(mixer->chip->usb_id) == 0x047f && + (cval->control == UAC_FU_MUTE || cval->control == UAC_FU_VOLUME)) + snd_fix_plt_name(mixer->chip, &kctl->id); } diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c index 6eb7d93b358d..20ac32635f1f 100644 --- a/sound/usb/mixer_us16x08.c +++ b/sound/usb/mixer_us16x08.c @@ -687,7 +687,7 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol, struct usb_mixer_elem_info *elem = kcontrol->private_data; struct snd_usb_audio *chip = elem->head.mixer->chip; struct snd_us16x08_meter_store *store = elem->private_data; - u8 meter_urb[64]; + u8 meter_urb[64] = {0}; switch (kcontrol->private_value) { case 0: { diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 75cde5779f38..d1bd8e0d6025 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -324,7 +324,6 @@ YAMAHA_DEVICE(0x105a, NULL), YAMAHA_DEVICE(0x105b, NULL), YAMAHA_DEVICE(0x105c, NULL), YAMAHA_DEVICE(0x105d, NULL), -YAMAHA_DEVICE(0x1718, "P-125"), { USB_DEVICE(0x0499, 0x1503), QUIRK_DRIVER_INFO { @@ -391,6 +390,19 @@ YAMAHA_DEVICE(0x1718, "P-125"), } } }, +{ + USB_DEVICE(0x0499, 0x1718), + QUIRK_DRIVER_INFO { + /* .vendor_name = "Yamaha", */ + /* .product_name = "P-125", */ + QUIRK_DATA_COMPOSITE { + { QUIRK_DATA_STANDARD_AUDIO(1) }, + { QUIRK_DATA_STANDARD_AUDIO(2) }, + { QUIRK_DATA_MIDI_YAMAHA(3) }, + QUIRK_COMPOSITE_END + } + } +}, YAMAHA_DEVICE(0x2000, "DGP-7"), YAMAHA_DEVICE(0x2001, "DGP-5"), YAMAHA_DEVICE(0x2002, NULL), diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index e96f5361e762..b2a612c5b299 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -553,6 +553,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip, static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf) { struct usb_host_config *config = dev->actconfig; + struct usb_device_descriptor *new_device_descriptor __free(kfree) = NULL; int err; if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD || @@ -563,11 +564,19 @@ static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interfac 0x10, 0x43, 0x0001, 0x000a, NULL, 0); if (err < 0) dev_dbg(&dev->dev, "error sending boot message: %d\n", err); + + new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL); + if (!new_device_descriptor) + return -ENOMEM; err = usb_get_descriptor(dev, USB_DT_DEVICE, 0, - &dev->descriptor, sizeof(dev->descriptor)); - config = dev->actconfig; + new_device_descriptor, sizeof(*new_device_descriptor)); if (err < 0) dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err); + if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations) + dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n", + new_device_descriptor->bNumConfigurations); + else + memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor)); err = usb_reset_configuration(dev); if (err < 0) dev_dbg(&dev->dev, "error usb_reset_configuration: %d\n", err); @@ -899,6 +908,7 @@ static void mbox2_setup_48_24_magic(struct usb_device *dev) static int snd_usb_mbox2_boot_quirk(struct usb_device *dev) { struct usb_host_config *config = dev->actconfig; + struct usb_device_descriptor *new_device_descriptor __free(kfree) = NULL; int err; u8 bootresponse[0x12]; int fwsize; @@ -933,11 +943,19 @@ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev) dev_dbg(&dev->dev, "device initialised!\n"); + new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL); + if (!new_device_descriptor) + return -ENOMEM; + err = usb_get_descriptor(dev, USB_DT_DEVICE, 0, - &dev->descriptor, sizeof(dev->descriptor)); - config = dev->actconfig; + new_device_descriptor, sizeof(*new_device_descriptor)); if (err < 0) dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err); + if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations) + dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n", + new_device_descriptor->bNumConfigurations); + else + memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor)); err = usb_reset_configuration(dev); if (err < 0) @@ -1251,6 +1269,7 @@ static void mbox3_setup_48_24_magic(struct usb_device *dev) static int snd_usb_mbox3_boot_quirk(struct usb_device *dev) { struct usb_host_config *config = dev->actconfig; + struct usb_device_descriptor *new_device_descriptor __free(kfree) = NULL; int err; int descriptor_size; @@ -1263,11 +1282,19 @@ static int snd_usb_mbox3_boot_quirk(struct usb_device *dev) dev_dbg(&dev->dev, "device initialised!\n"); + new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL); + if (!new_device_descriptor) + return -ENOMEM; + err = usb_get_descriptor(dev, USB_DT_DEVICE, 0, - &dev->descriptor, sizeof(dev->descriptor)); - config = dev->actconfig; + new_device_descriptor, sizeof(*new_device_descriptor)); if (err < 0) dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err); + if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations) + dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n", + new_device_descriptor->bNumConfigurations); + else + memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor)); err = usb_reset_configuration(dev); if (err < 0) @@ -1746,6 +1773,7 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, case USB_ID(0x534d, 0x2109): /* MacroSilicon MS2109 */ subs->stream_offset_adj = 2; break; + case USB_ID(0x2b73, 0x000a): /* Pioneer DJM-900NXS2 */ case USB_ID(0x2b73, 0x0013): /* Pioneer DJM-450 */ pioneer_djm_set_format_quirk(subs, 0x0082); break; @@ -2015,7 +2043,7 @@ struct usb_audio_quirk_flags_table { static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { /* Device matches */ DEVICE_FLG(0x03f0, 0x654a, /* HP 320 FHD Webcam */ - QUIRK_FLAG_GET_SAMPLE_RATE), + QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16), DEVICE_FLG(0x041e, 0x3000, /* Creative SB Extigy */ QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x041e, 0x4080, /* Creative Live Cam VF0610 */ @@ -2023,12 +2051,35 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { DEVICE_FLG(0x045e, 0x083c, /* MS USB Link headset */ QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY | QUIRK_FLAG_DISABLE_AUTOSUSPEND), + DEVICE_FLG(0x046d, 0x0807, /* Logitech Webcam C500 */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), + DEVICE_FLG(0x046d, 0x0808, /* Logitech Webcam C600 */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), + DEVICE_FLG(0x046d, 0x0809, + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), + DEVICE_FLG(0x046d, 0x0819, /* Logitech Webcam C210 */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), + DEVICE_FLG(0x046d, 0x081b, /* HD Webcam c310 */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), + DEVICE_FLG(0x046d, 0x081d, /* HD Webcam c510 */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), + DEVICE_FLG(0x046d, 0x0825, /* HD Webcam c270 */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), + DEVICE_FLG(0x046d, 0x0826, /* HD Webcam c525 */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), DEVICE_FLG(0x046d, 0x084c, /* Logitech ConferenceCam Connect */ QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x046d, 0x08ca, /* Logitech Quickcam Fusion */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), DEVICE_FLG(0x046d, 0x0991, /* Logitech QuickCam Pro */ - QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR), + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR | + QUIRK_FLAG_MIC_RES_384), + DEVICE_FLG(0x046d, 0x09a2, /* QuickCam Communicate Deluxe/S7500 */ + QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_MIC_RES_384), DEVICE_FLG(0x046d, 0x09a4, /* Logitech QuickCam E 3500 */ QUIRK_FLAG_CTL_MSG_DELAY_1M | QUIRK_FLAG_IGNORE_CTL_ERROR), + DEVICE_FLG(0x0499, 0x1506, /* Yamaha THR5 */ + QUIRK_FLAG_GENERIC_IMPLICIT_FB), DEVICE_FLG(0x0499, 0x1509, /* Steinberg UR22 */ QUIRK_FLAG_GENERIC_IMPLICIT_FB), DEVICE_FLG(0x0499, 0x3108, /* Yamaha YIT-W12TX */ @@ -2087,14 +2138,20 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x0c45, 0x6340, /* Sonix HD USB Camera */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x0c45, 0x636b, /* Microdia JP001 USB Camera */ + QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x0d8c, 0x0014, /* USB Audio Device */ + QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x0ecb, 0x205c, /* JBL Quantum610 Wireless */ QUIRK_FLAG_FIXED_RATE), DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */ QUIRK_FLAG_FIXED_RATE), DEVICE_FLG(0x0fd9, 0x0008, /* Hauppauge HVR-950Q */ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER), - DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */ + DEVICE_FLG(0x1101, 0x0003, /* Audioengine D1 */ QUIRK_FLAG_GET_SAMPLE_RATE), + DEVICE_FLG(0x1224, 0x2a25, /* Jieli Technology USB PHY 2.0 */ + QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16), DEVICE_FLG(0x1395, 0x740a, /* Sennheiser DECT */ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x1397, 0x0507, /* Behringer UMC202HD */ @@ -2132,9 +2189,9 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { DEVICE_FLG(0x19f7, 0x0035, /* RODE NT-USB+ */ QUIRK_FLAG_GET_SAMPLE_RATE), DEVICE_FLG(0x1bcf, 0x2281, /* HD Webcam */ - QUIRK_FLAG_GET_SAMPLE_RATE), + QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16), DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */ - QUIRK_FLAG_GET_SAMPLE_RATE), + QUIRK_FLAG_GET_SAMPLE_RATE | QUIRK_FLAG_MIC_RES_16), DEVICE_FLG(0x2040, 0x7200, /* Hauppauge HVR-950Q */ QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER), DEVICE_FLG(0x2040, 0x7201, /* Hauppauge HVR-950Q-MXL */ @@ -2173,6 +2230,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_DSD_RAW), DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */ QUIRK_FLAG_SET_IFACE_FIRST), + DEVICE_FLG(0x262a, 0x9302, /* ddHiFi TC44C */ + QUIRK_FLAG_DSD_RAW), DEVICE_FLG(0x2708, 0x0002, /* Audient iD14 */ QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */ @@ -2187,6 +2246,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = { QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x2d95, 0x8021, /* VIVO USB-C-XE710 HEADSET */ QUIRK_FLAG_CTL_MSG_DELAY_1M), + DEVICE_FLG(0x2fc6, 0xf0b7, /* iBasso DC07 Pro */ + QUIRK_FLAG_CTL_MSG_DELAY_1M), DEVICE_FLG(0x30be, 0x0101, /* Schiit Hel */ QUIRK_FLAG_IGNORE_CTL_ERROR), DEVICE_FLG(0x413c, 0xa506, /* Dell AE515 sound bar */ diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index f5a8dca66457..65dcb1a02e97 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -178,6 +178,8 @@ extern bool snd_usb_skip_validation; * QUIRK_FLAG_FIXED_RATE * Do not set PCM rate (frequency) when only one rate is available * for the given endpoint. + * QUIRK_FLAG_MIC_RES_16 and QUIRK_FLAG_MIC_RES_384 + * Set the fixed resolution for Mic Capture Volume (mostly for webcams) */ #define QUIRK_FLAG_GET_SAMPLE_RATE (1U << 0) @@ -202,5 +204,7 @@ extern bool snd_usb_skip_validation; #define QUIRK_FLAG_IFACE_SKIP_CLOSE (1U << 19) #define QUIRK_FLAG_FORCE_IFACE_RESET (1U << 20) #define QUIRK_FLAG_FIXED_RATE (1U << 21) +#define QUIRK_FLAG_MIC_RES_16 (1U << 22) +#define QUIRK_FLAG_MIC_RES_384 (1U << 23) #endif /* __USBAUDIO_H */ diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c index e558931cce16..23d7d542a3de 100644 --- a/sound/usb/usx2y/us122l.c +++ b/sound/usb/usx2y/us122l.c @@ -617,10 +617,7 @@ static void snd_us122l_disconnect(struct usb_interface *intf) usb_put_intf(usb_ifnum_to_if(us122l->dev, 1)); usb_put_dev(us122l->dev); - while (atomic_read(&us122l->mmap_count)) - msleep(500); - - snd_card_free(card); + snd_card_free_when_closed(card); } static int snd_us122l_suspend(struct usb_interface *intf, pm_message_t message) diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c index 52f4e6652407..0fe989a63376 100644 --- a/sound/usb/usx2y/usbusx2y.c +++ b/sound/usb/usx2y/usbusx2y.c @@ -151,6 +151,12 @@ static int snd_usx2y_card_used[SNDRV_CARDS]; static void snd_usx2y_card_private_free(struct snd_card *card); static void usx2y_unlinkseq(struct snd_usx2y_async_seq *s); +#ifdef USX2Y_NRPACKS_VARIABLE +int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ +module_param(nrpacks, int, 0444); +MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); +#endif + /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ @@ -423,7 +429,7 @@ static void snd_usx2y_disconnect(struct usb_interface *intf) } if (usx2y->us428ctls_sharedmem) wake_up(&usx2y->us428ctls_wait_queue_head); - snd_card_free(card); + snd_card_free_when_closed(card); } static int snd_usx2y_probe(struct usb_interface *intf, @@ -433,6 +439,11 @@ static int snd_usx2y_probe(struct usb_interface *intf, struct snd_card *card; int err; +#ifdef USX2Y_NRPACKS_VARIABLE + if (nrpacks < 0 || nrpacks > USX2Y_NRPACKS_MAX) + return -EINVAL; +#endif + if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && diff --git a/sound/usb/usx2y/usbusx2y.h b/sound/usb/usx2y/usbusx2y.h index 8d82f5cc2fe1..0538c457921e 100644 --- a/sound/usb/usx2y/usbusx2y.h +++ b/sound/usb/usx2y/usbusx2y.h @@ -7,6 +7,32 @@ #define NRURBS 2 +/* Default value used for nr of packs per urb. + * 1 to 4 have been tested ok on uhci. + * To use 3 on ohci, you'd need a patch: + * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on + * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425" + * + * 1, 2 and 4 work out of the box on ohci, if I recall correctly. + * Bigger is safer operation, smaller gives lower latencies. + */ +#define USX2Y_NRPACKS 4 + +#define USX2Y_NRPACKS_MAX 1024 + +/* If your system works ok with this module's parameter + * nrpacks set to 1, you might as well comment + * this define out, and thereby produce smaller, faster code. + * You'd also set USX2Y_NRPACKS to 1 then. + */ +#define USX2Y_NRPACKS_VARIABLE 1 + +#ifdef USX2Y_NRPACKS_VARIABLE +extern int nrpacks; +#define nr_of_packs() nrpacks +#else +#define nr_of_packs() USX2Y_NRPACKS +#endif #define URBS_ASYNC_SEQ 10 #define URB_DATA_LEN_ASYNC_SEQ 32 diff --git a/sound/usb/usx2y/usbusx2yaudio.c b/sound/usb/usx2y/usbusx2yaudio.c index 5197599e7aa6..98d0e8edc983 100644 --- a/sound/usb/usx2y/usbusx2yaudio.c +++ b/sound/usb/usx2y/usbusx2yaudio.c @@ -28,33 +28,6 @@ #include "usx2y.h" #include "usbusx2y.h" -/* Default value used for nr of packs per urb. - * 1 to 4 have been tested ok on uhci. - * To use 3 on ohci, you'd need a patch: - * look for "0000425-linux-2.6.9-rc4-mm1_ohci-hcd.patch.gz" on - * "https://bugtrack.alsa-project.org/alsa-bug/bug_view_page.php?bug_id=0000425" - * - * 1, 2 and 4 work out of the box on ohci, if I recall correctly. - * Bigger is safer operation, smaller gives lower latencies. - */ -#define USX2Y_NRPACKS 4 - -/* If your system works ok with this module's parameter - * nrpacks set to 1, you might as well comment - * this define out, and thereby produce smaller, faster code. - * You'd also set USX2Y_NRPACKS to 1 then. - */ -#define USX2Y_NRPACKS_VARIABLE 1 - -#ifdef USX2Y_NRPACKS_VARIABLE -static int nrpacks = USX2Y_NRPACKS; /* number of packets per urb */ -#define nr_of_packs() nrpacks -module_param(nrpacks, int, 0444); -MODULE_PARM_DESC(nrpacks, "Number of packets per URB."); -#else -#define nr_of_packs() USX2Y_NRPACKS -#endif - static int usx2y_urb_capt_retire(struct snd_usx2y_substream *subs) { struct urb *urb = subs->completed_urb; diff --git a/sound/virtio/virtio_pcm.c b/sound/virtio/virtio_pcm.c index c10d91fff2fb..1ddec1f4f05d 100644 --- a/sound/virtio/virtio_pcm.c +++ b/sound/virtio/virtio_pcm.c @@ -337,6 +337,21 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd) if (!snd->substreams) return -ENOMEM; + /* + * Initialize critical substream fields early in case we hit an + * error path and end up trying to clean up uninitialized structures + * elsewhere. + */ + for (i = 0; i < snd->nsubstreams; ++i) { + struct virtio_pcm_substream *vss = &snd->substreams[i]; + + vss->snd = snd; + vss->sid = i; + INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed); + init_waitqueue_head(&vss->msg_empty); + spin_lock_init(&vss->lock); + } + info = kcalloc(snd->nsubstreams, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; @@ -350,12 +365,6 @@ int virtsnd_pcm_parse_cfg(struct virtio_snd *snd) struct virtio_pcm_substream *vss = &snd->substreams[i]; struct virtio_pcm *vpcm; - vss->snd = snd; - vss->sid = i; - INIT_WORK(&vss->elapsed_period, virtsnd_pcm_period_elapsed); - init_waitqueue_head(&vss->msg_empty); - spin_lock_init(&vss->lock); - rc = virtsnd_pcm_build_hw(vss, &info[i]); if (rc) goto on_exit; diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c index 156b62a163c5..8a48cc2536f5 100644 --- a/tools/bootconfig/main.c +++ b/tools/bootconfig/main.c @@ -226,7 +226,7 @@ static int load_xbc_from_initrd(int fd, char **buf) /* Wrong Checksum */ rcsum = xbc_calc_checksum(*buf, size); if (csum != rcsum) { - pr_err("checksum error: %d != %d\n", csum, rcsum); + pr_err("checksum error: %u != %u\n", csum, rcsum); return -EINVAL; } @@ -395,7 +395,7 @@ static int apply_xbc(const char *path, const char *xbc_path) xbc_get_info(&ret, NULL); printf("\tNumber of nodes: %d\n", ret); printf("\tSize: %u bytes\n", (unsigned int)size); - printf("\tChecksum: %d\n", (unsigned int)csum); + printf("\tChecksum: %u\n", (unsigned int)csum); /* TODO: Check the options by schema */ xbc_exit(); diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index db02b000fbeb..eea00bc15b5c 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -384,10 +384,11 @@ int get_fd_type(int fd) p_err("can't read link type: %s", strerror(errno)); return -1; } - if (n == sizeof(path)) { + if (n == sizeof(buf)) { p_err("can't read link type: path too long!"); return -1; } + buf[n] = '\0'; if (strstr(buf, "bpf-map")) return BPF_OBJ_MAP; diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c index aaf99a0168c9..fe23c9669a87 100644 --- a/tools/bpf/bpftool/jit_disasm.c +++ b/tools/bpf/bpftool/jit_disasm.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include @@ -29,14 +28,18 @@ #include "json_writer.h" #include "main.h" -static void get_exec_path(char *tpath, size_t size) +static int get_exec_path(char *tpath, size_t size) { const char *path = "/proc/self/exe"; ssize_t len; len = readlink(path, tpath, size - 1); - assert(len > 0); + if (len <= 0) + return -1; + tpath[len] = 0; + + return 0; } static int oper_count; @@ -97,30 +100,39 @@ static int fprintf_json_styled(void *out, return r; } -void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, - const char *arch, const char *disassembler_options, - const struct btf *btf, - const struct bpf_prog_linfo *prog_linfo, - __u64 func_ksym, unsigned int func_idx, - bool linum) +int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, + const char *arch, const char *disassembler_options, + const struct btf *btf, + const struct bpf_prog_linfo *prog_linfo, + __u64 func_ksym, unsigned int func_idx, + bool linum) { const struct bpf_line_info *linfo = NULL; disassembler_ftype disassemble; + int count, i, pc = 0, err = -1; struct disassemble_info info; unsigned int nr_skip = 0; - int count, i, pc = 0; char tpath[PATH_MAX]; bfd *bfdf; if (!len) - return; + return -1; memset(tpath, 0, sizeof(tpath)); - get_exec_path(tpath, sizeof(tpath)); + if (get_exec_path(tpath, sizeof(tpath))) { + p_err("failed to create disasembler (get_exec_path)"); + return -1; + } bfdf = bfd_openr(tpath, NULL); - assert(bfdf); - assert(bfd_check_format(bfdf, bfd_object)); + if (!bfdf) { + p_err("failed to create disassembler (bfd_openr)"); + return -1; + } + if (!bfd_check_format(bfdf, bfd_object)) { + p_err("failed to create disassembler (bfd_check_format)"); + goto exit_close; + } if (json_output) init_disassemble_info_compat(&info, stdout, @@ -139,7 +151,7 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, bfdf->arch_info = inf; } else { p_err("No libbfd support for %s", arch); - return; + goto exit_close; } } @@ -160,7 +172,10 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, #else disassemble = disassembler(bfdf); #endif - assert(disassemble); + if (!disassemble) { + p_err("failed to create disassembler"); + goto exit_close; + } if (json_output) jsonw_start_array(json_wtr); @@ -224,7 +239,11 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, if (json_output) jsonw_end_array(json_wtr); + err = 0; + +exit_close: bfd_close(bfdf); + return err; } int disasm_init(void) diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h index 5e5060c2ac04..c9e171082cf6 100644 --- a/tools/bpf/bpftool/main.h +++ b/tools/bpf/bpftool/main.h @@ -173,22 +173,23 @@ int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len); struct bpf_prog_linfo; #ifdef HAVE_LIBBFD_SUPPORT -void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, - const char *arch, const char *disassembler_options, - const struct btf *btf, - const struct bpf_prog_linfo *prog_linfo, - __u64 func_ksym, unsigned int func_idx, - bool linum); +int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, + const char *arch, const char *disassembler_options, + const struct btf *btf, + const struct bpf_prog_linfo *prog_linfo, + __u64 func_ksym, unsigned int func_idx, + bool linum); int disasm_init(void); #else static inline -void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, - const char *arch, const char *disassembler_options, - const struct btf *btf, - const struct bpf_prog_linfo *prog_linfo, - __u64 func_ksym, unsigned int func_idx, - bool linum) +int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes, + const char *arch, const char *disassembler_options, + const struct btf *btf, + const struct bpf_prog_linfo *prog_linfo, + __u64 func_ksym, unsigned int func_idx, + bool linum) { + return 0; } static inline int disasm_init(void) { diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 9a6ca9f31133..3087ced658ad 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -1,7 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ -#include #include #include #include diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 7e0b846e17ee..1c3dc1dae23f 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -820,10 +820,18 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, printf("%s:\n", sym_name); } - disasm_print_insn(img, lens[i], opcodes, - name, disasm_opt, btf, - prog_linfo, ksyms[i], i, - linum); + if (ksyms) { + if (disasm_print_insn(img, lens[i], opcodes, + name, disasm_opt, btf, + prog_linfo, ksyms[i], i, + linum)) + goto exit_free; + } else { + if (disasm_print_insn(img, lens[i], opcodes, + name, disasm_opt, btf, + NULL, 0, 0, false)) + goto exit_free; + } img += lens[i]; @@ -836,8 +844,10 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode, if (json_output) jsonw_end_array(json_wtr); } else { - disasm_print_insn(buf, member_len, opcodes, name, - disasm_opt, btf, NULL, 0, 0, false); + if (disasm_print_insn(buf, member_len, opcodes, name, + disasm_opt, btf, NULL, 0, 0, + false)) + goto exit_free; } } else if (visual) { if (json_output) diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build index 715092fc6a23..6a043b729b36 100644 --- a/tools/build/Makefile.build +++ b/tools/build/Makefile.build @@ -130,6 +130,10 @@ objprefix := $(subst ./,,$(OUTPUT)$(dir)/) obj-y := $(addprefix $(objprefix),$(obj-y)) subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y)) +# Separate out test log files from real build objects. +test-y := $(filter %_log, $(obj-y)) +obj-y := $(filter-out %_log, $(obj-y)) + # Final '$(obj)-in.o' object in-target := $(objprefix)$(obj)-in.o @@ -140,7 +144,7 @@ $(subdir-y): $(sort $(subdir-obj-y)): $(subdir-y) ; -$(in-target): $(obj-y) FORCE +$(in-target): $(obj-y) $(test-y) FORCE $(call rule_mkdir) $(call if_changed,$(host)ld_multi) diff --git a/tools/include/uapi/linux/stddef.h b/tools/include/uapi/linux/stddef.h index bb6ea517efb5..c53cde425406 100644 --- a/tools/include/uapi/linux/stddef.h +++ b/tools/include/uapi/linux/stddef.h @@ -8,6 +8,13 @@ #define __always_inline __inline__ #endif +/* Not all C++ standards support type declarations inside an anonymous union */ +#ifndef __cplusplus +#define __struct_group_tag(TAG) TAG +#else +#define __struct_group_tag(TAG) +#endif + /** * __struct_group() - Create a mirrored named and anonyomous struct * @@ -20,14 +27,14 @@ * and size: one anonymous and one named. The former's members can be used * normally without sub-struct naming, and the latter can be used to * reason about the start, end, and size of the group of struct members. - * The named struct can also be explicitly tagged for layer reuse, as well - * as both having struct attributes appended. + * The named struct can also be explicitly tagged for layer reuse (C only), + * as well as both having struct attributes appended. */ #define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ union { \ struct { MEMBERS } ATTRS; \ - struct TAG { MEMBERS } ATTRS NAME; \ - } + struct __struct_group_tag(TAG) { MEMBERS } ATTRS NAME; \ + } ATTRS /** * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 878f05a42421..98d5e566e058 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1751,7 +1751,7 @@ static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, } len = strlen(value); - if (value[len - 1] != '"') { + if (len < 2 || value[len - 1] != '"') { pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", ext->name, value); return -EINVAL; @@ -3501,7 +3501,7 @@ static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) return true; /* global function */ - return bind == STB_GLOBAL && type == STT_FUNC; + return (bind == STB_GLOBAL || bind == STB_WEAK) && type == STT_FUNC; } static int find_extern_btf_id(const struct btf *btf, const char *ext_name) @@ -3896,7 +3896,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj) static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog) { - return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; + return prog->sec_idx == obj->efile.text_shndx; } struct bpf_program * diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 8a7cb830bff1..752ef88c9fd9 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -396,6 +396,8 @@ static int init_output_elf(struct bpf_linker *linker, const char *file) pr_warn_elf("failed to create SYMTAB data"); return -EINVAL; } + /* Ensure libelf translates byte-order of symbol records */ + sec->data->d_type = ELF_T_SYM; str_off = strset__add_str(linker->strtab_strs, sec->sec_name); if (str_off < 0) @@ -565,17 +567,15 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, } obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL); if (!obj->elf) { - err = -errno; pr_warn_elf("failed to parse ELF file '%s'", filename); - return err; + return -EINVAL; } /* Sanity check ELF file high-level properties */ ehdr = elf64_getehdr(obj->elf); if (!ehdr) { - err = -errno; pr_warn_elf("failed to get ELF header for %s", filename); - return err; + return -EINVAL; } if (ehdr->e_ident[EI_DATA] != host_endianness) { err = -EOPNOTSUPP; @@ -591,9 +591,8 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, } if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) { - err = -errno; pr_warn_elf("failed to get SHSTRTAB section index for %s", filename); - return err; + return -EINVAL; } scn = NULL; @@ -603,26 +602,23 @@ static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, shdr = elf64_getshdr(scn); if (!shdr) { - err = -errno; pr_warn_elf("failed to get section #%zu header for %s", sec_idx, filename); - return err; + return -EINVAL; } sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name); if (!sec_name) { - err = -errno; pr_warn_elf("failed to get section #%zu name for %s", sec_idx, filename); - return err; + return -EINVAL; } data = elf_getdata(scn, 0); if (!data) { - err = -errno; pr_warn_elf("failed to get section #%zu (%s) data from %s", sec_idx, sec_name, filename); - return err; + return -EINVAL; } sec = add_src_sec(obj, sec_name); @@ -1966,7 +1962,7 @@ add_sym: obj->sym_map[src_sym_idx] = dst_sym_idx; - if (sym_type == STT_SECTION && dst_sym) { + if (sym_type == STT_SECTION && dst_sec) { dst_sec->sec_sym_idx = dst_sym_idx; dst_sym->st_value = 0; } @@ -2595,14 +2591,14 @@ int bpf_linker__finalize(struct bpf_linker *linker) /* Finalize ELF layout */ if (elf_update(linker->elf, ELF_C_NULL) < 0) { - err = -errno; + err = -EINVAL; pr_warn_elf("failed to finalize ELF layout"); return libbpf_err(err); } /* Write out final ELF contents */ if (elf_update(linker->elf, ELF_C_WRITE) < 0) { - err = -errno; + err = -EINVAL; pr_warn_elf("failed to write ELF contents"); return libbpf_err(err); } diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c index af1cb30556b4..b8e83712a5d7 100644 --- a/tools/lib/bpf/usdt.c +++ b/tools/lib/bpf/usdt.c @@ -653,7 +653,7 @@ static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char * * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation */ usdt_abs_ip = note.loc_addr; - if (base_addr) + if (base_addr && note.base_addr) usdt_abs_ip += base_addr - note.base_addr; /* When attaching uprobes (which is what USDTs basically are) diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile index 2d0d255fd0e1..8890fd57b110 100644 --- a/tools/lib/thermal/Makefile +++ b/tools/lib/thermal/Makefile @@ -121,7 +121,9 @@ all: fixdep clean: $(call QUIET_CLEAN, libthermal) $(RM) $(LIBTHERMAL_A) \ - *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBTHERMAL_VERSION) .*.d .*.cmd LIBTHERMAL-CFLAGS $(LIBTHERMAL_PC) + *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBTHERMAL_VERSION) \ + .*.d .*.cmd LIBTHERMAL-CFLAGS $(LIBTHERMAL_PC) \ + $(srctree)/tools/$(THERMAL_UAPI) $(LIBTHERMAL_PC): $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \ diff --git a/tools/lib/thermal/commands.c b/tools/lib/thermal/commands.c index 73d4d4e8d6ec..27b4442f0e34 100644 --- a/tools/lib/thermal/commands.c +++ b/tools/lib/thermal/commands.c @@ -261,9 +261,25 @@ static struct genl_ops thermal_cmd_ops = { .o_ncmds = ARRAY_SIZE(thermal_cmds), }; -static thermal_error_t thermal_genl_auto(struct thermal_handler *th, int id, int cmd, - int flags, void *arg) +struct cmd_param { + int tz_id; +}; + +typedef int (*cmd_cb_t)(struct nl_msg *, struct cmd_param *); + +static int thermal_genl_tz_id_encode(struct nl_msg *msg, struct cmd_param *p) { + if (p->tz_id >= 0 && nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id)) + return -1; + + return 0; +} + +static thermal_error_t thermal_genl_auto(struct thermal_handler *th, cmd_cb_t cmd_cb, + struct cmd_param *param, + int cmd, int flags, void *arg) +{ + thermal_error_t ret = THERMAL_ERROR; struct nl_msg *msg; void *hdr; @@ -274,45 +290,55 @@ static thermal_error_t thermal_genl_auto(struct thermal_handler *th, int id, int hdr = genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, thermal_cmd_ops.o_id, 0, flags, cmd, THERMAL_GENL_VERSION); if (!hdr) - return THERMAL_ERROR; + goto out; - if (id >= 0 && nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id)) - return THERMAL_ERROR; + if (cmd_cb && cmd_cb(msg, param)) + goto out; if (nl_send_msg(th->sk_cmd, th->cb_cmd, msg, genl_handle_msg, arg)) - return THERMAL_ERROR; + goto out; + ret = THERMAL_SUCCESS; +out: nlmsg_free(msg); - return THERMAL_SUCCESS; + return ret; } thermal_error_t thermal_cmd_get_tz(struct thermal_handler *th, struct thermal_zone **tz) { - return thermal_genl_auto(th, -1, THERMAL_GENL_CMD_TZ_GET_ID, + return thermal_genl_auto(th, NULL, NULL, THERMAL_GENL_CMD_TZ_GET_ID, NLM_F_DUMP | NLM_F_ACK, tz); } thermal_error_t thermal_cmd_get_cdev(struct thermal_handler *th, struct thermal_cdev **tc) { - return thermal_genl_auto(th, -1, THERMAL_GENL_CMD_CDEV_GET, + return thermal_genl_auto(th, NULL, NULL, THERMAL_GENL_CMD_CDEV_GET, NLM_F_DUMP | NLM_F_ACK, tc); } thermal_error_t thermal_cmd_get_trip(struct thermal_handler *th, struct thermal_zone *tz) { - return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_TRIP, - 0, tz); + struct cmd_param p = { .tz_id = tz->id }; + + return thermal_genl_auto(th, thermal_genl_tz_id_encode, &p, + THERMAL_GENL_CMD_TZ_GET_TRIP, 0, tz); } thermal_error_t thermal_cmd_get_governor(struct thermal_handler *th, struct thermal_zone *tz) { - return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_GOV, 0, tz); + struct cmd_param p = { .tz_id = tz->id }; + + return thermal_genl_auto(th, thermal_genl_tz_id_encode, &p, + THERMAL_GENL_CMD_TZ_GET_GOV, 0, tz); } thermal_error_t thermal_cmd_get_temp(struct thermal_handler *th, struct thermal_zone *tz) { - return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_TEMP, 0, tz); + struct cmd_param p = { .tz_id = tz->id }; + + return thermal_genl_auto(th, thermal_genl_tz_id_encode, &p, + THERMAL_GENL_CMD_TZ_GET_TEMP, 0, tz); } thermal_error_t thermal_cmd_exit(struct thermal_handler *th) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index cb363b507a32..bf75628c5389 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -3083,7 +3083,7 @@ static int handle_insn_ops(struct instruction *insn, if (update_cfi_state(insn, next_insn, &state->cfi, op)) return 1; - if (!insn->alt_group) + if (!opts.uaccess || !insn->alt_group) continue; if (op->dest.type == OP_DEST_PUSHF) { @@ -3368,6 +3368,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (!strncmp(func->name, "__cfi_", 6)) return 0; + if (file->ignore_unreachables) + return 0; + WARN("%s() falls through to next function %s()", func->name, insn->func->name); return 1; @@ -3521,14 +3524,20 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, break; case INSN_CONTEXT_SWITCH: - if (func && (!next_insn || !next_insn->hint)) { - WARN_FUNC("unsupported instruction in callable function", - sec, insn->offset); - return 1; + if (func) { + if (!next_insn || !next_insn->hint) { + WARN_FUNC("unsupported instruction in callable function", + sec, insn->offset); + return 1; + } + break; } return 0; case INSN_STAC: + if (!opts.uaccess) + break; + if (state.uaccess) { WARN_FUNC("recursive UACCESS enable", sec, insn->offset); return 1; @@ -3538,6 +3547,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, break; case INSN_CLAC: + if (!opts.uaccess) + break; + if (!state.uaccess && func) { WARN_FUNC("redundant UACCESS disable", sec, insn->offset); return 1; @@ -3579,6 +3591,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (!next_insn) { if (state.cfi.cfa.base == CFI_UNDEFINED) return 0; + if (file->ignore_unreachables) + return 0; + WARN("%s: unexpected end of section", sec->name); return 1; } @@ -3728,6 +3743,9 @@ static int validate_entry(struct objtool_file *file, struct instruction *insn) break; } + if (insn->dead_end) + return 0; + if (!next) { WARN_FUNC("teh end!", insn->sec, insn->offset); return -1; @@ -3944,7 +3962,8 @@ static int validate_symbol(struct objtool_file *file, struct section *sec, if (!insn || insn->ignore || insn->visited) return 0; - state->uaccess = sym->uaccess_safe; + if (opts.uaccess) + state->uaccess = sym->uaccess_safe; ret = validate_branch(file, insn->func, insn, *state); if (ret && opts.backtrace) diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index 1d40f9bcb63b..86597f611900 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -769,7 +769,7 @@ static void display_histogram(int buckets[], bool use_nsec) bar_len = buckets[0] * bar_total / total; printf(" %4d - %-4d %s | %10d | %.*s%*s |\n", - 0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, ""); + 0, 1, use_nsec ? "ns" : "us", buckets[0], bar_len, bar, bar_total - bar_len, ""); for (i = 1; i < NUM_BUCKET - 1; i++) { int start = (1 << (i - 1)); diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index bdd8dd54fdb6..b243027bc22d 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -756,15 +756,19 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) evlist__set_leader(evsel_list); if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { - if (affinity__setup(&saved_affinity) < 0) - return -1; + if (affinity__setup(&saved_affinity) < 0) { + err = -1; + goto err_out; + } affinity = &saved_affinity; } evlist__for_each_entry(evsel_list, counter) { counter->reset_group = false; - if (bpf_counter__load(counter, &target)) - return -1; + if (bpf_counter__load(counter, &target)) { + err = -1; + goto err_out; + } if (!(evsel__is_bperf(counter))) all_counters_use_bpf = false; } @@ -805,7 +809,8 @@ try_again: switch (stat_handle_error(counter)) { case COUNTER_FATAL: - return -1; + err = -1; + goto err_out; case COUNTER_RETRY: goto try_again; case COUNTER_SKIP: @@ -846,7 +851,8 @@ try_again_reset: switch (stat_handle_error(counter)) { case COUNTER_FATAL: - return -1; + err = -1; + goto err_out; case COUNTER_RETRY: goto try_again_reset; case COUNTER_SKIP: @@ -859,6 +865,7 @@ try_again_reset: } } affinity__cleanup(affinity); + affinity = NULL; evlist__for_each_entry(evsel_list, counter) { if (!counter->supported) { @@ -871,8 +878,10 @@ try_again_reset: stat_config.unit_width = l; if (evsel__should_store_id(counter) && - evsel__store_ids(counter, evsel_list)) - return -1; + evsel__store_ids(counter, evsel_list)) { + err = -1; + goto err_out; + } } if (evlist__apply_filters(evsel_list, &counter)) { @@ -893,20 +902,23 @@ try_again_reset: } if (err < 0) - return err; + goto err_out; err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, process_synthesized_event, is_pipe); if (err < 0) - return err; + goto err_out; + } if (target.initial_delay) { pr_info(EVLIST_DISABLED_MSG); } else { err = enable_counters(); - if (err) - return -1; + if (err) { + err = -1; + goto err_out; + } } /* Exec the command, if any */ @@ -916,8 +928,10 @@ try_again_reset: if (target.initial_delay > 0) { usleep(target.initial_delay * USEC_PER_MSEC); err = enable_counters(); - if (err) - return -1; + if (err) { + err = -1; + goto err_out; + } pr_info(EVLIST_ENABLED_MSG); } @@ -937,7 +951,8 @@ try_again_reset: if (workload_exec_errno) { const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); pr_err("Workload failed: %s\n", emsg); - return -1; + err = -1; + goto err_out; } if (WIFSIGNALED(status)) @@ -986,6 +1001,13 @@ try_again_reset: evlist__close(evsel_list); return WEXITSTATUS(status); + +err_out: + if (forks) + evlist__cancel_workload(evsel_list); + + affinity__cleanup(affinity); + return err; } static int run_perf_stat(int argc, const char **argv, int run_idx) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 93dab6423a04..441655e659c2 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -2385,6 +2385,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, char msg[1024]; void *args, *augmented_args = NULL; int augmented_args_size; + size_t printed = 0; if (sc == NULL) return -1; @@ -2400,8 +2401,8 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, args = perf_evsel__sc_tp_ptr(evsel, args, sample); augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls_args_size); - syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); - fprintf(trace->output, "%s", msg); + printed += syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread); + fprintf(trace->output, "%.*s", (int)printed, msg); err = 0; out_put: thread__put(thread); @@ -2770,7 +2771,7 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val); } - return printed + fprintf(trace->output, "%s", bf); + return printed + fprintf(trace->output, "%.*s", (int)printed, bf); } static int trace__event_handler(struct trace *trace, struct evsel *evsel, @@ -2779,13 +2780,8 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel, { struct thread *thread; int callchain_ret = 0; - /* - * Check if we called perf_evsel__disable(evsel) due to, for instance, - * this event's max_events having been hit and this is an entry coming - * from the ring buffer that we should discard, since the max events - * have already been considered/printed. - */ - if (evsel->disabled) + + if (evsel->nr_events_printed >= evsel->max_events) return 0; thread = machine__findnew_thread(trace->host, sample->pid, sample->tid); diff --git a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S index 75cf084a927d..577760046772 100644 --- a/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S +++ b/tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S @@ -26,3 +26,5 @@ skip: mov x0, #0 mov x8, #93 // __NR_exit syscall svc #0 + +.section .note.GNU-stack, "", @progbits diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 09e240e4477d..3b54baf79bd4 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -2124,12 +2124,6 @@ static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq) /* Ignore return value */ cs_etm__process_traceid_queue(etmq, tidq); - - /* - * Generate an instruction sample with the remaining - * branchstack entries. - */ - cs_etm__flush(etmq, tidq); } } @@ -2226,7 +2220,7 @@ static int cs_etm__process_queues(struct cs_etm_auxtrace *etm) while (1) { if (!etm->heap.heap_cnt) - goto out; + break; /* Take the entry at the top of the min heap */ cs_queue_nr = etm->heap.heap_array[0].queue_nr; @@ -2309,6 +2303,23 @@ refetch: ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp); } + for (i = 0; i < etm->queues.nr_queues; i++) { + struct int_node *inode; + + etmq = etm->queues.queue_array[i].priv; + if (!etmq) + continue; + + intlist__for_each_entry(inode, etmq->traceid_queues_list) { + int idx = (int)(intptr_t)inode->priv; + + /* Flush any remaining branch stack entries */ + tidq = etmq->traceid_queues[idx]; + ret = cs_etm__end_block(etmq, tidq); + if (ret) + return ret; + } + } out: return ret; } diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index ca08e6dc8b23..41bbe6f85b0d 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -1341,19 +1342,18 @@ static int evlist__create_syswide_maps(struct evlist *evlist) */ cpus = perf_cpu_map__new(NULL); if (!cpus) - goto out; + return -ENOMEM; threads = perf_thread_map__new_dummy(); - if (!threads) - goto out_put; + if (!threads) { + perf_cpu_map__put(cpus); + return -ENOMEM; + } perf_evlist__set_maps(&evlist->core, cpus, threads); - perf_thread_map__put(threads); -out_put: perf_cpu_map__put(cpus); -out: - return -ENOMEM; + return 0; } int evlist__open(struct evlist *evlist) @@ -1392,6 +1392,8 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const int child_ready_pipe[2], go_pipe[2]; char bf; + evlist->workload.cork_fd = -1; + if (pipe(child_ready_pipe) < 0) { perror("failed to create 'ready' pipe"); return -1; @@ -1444,7 +1446,7 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const * For cancelling the workload without actually running it, * the parent will just close workload.cork_fd, without writing * anything, i.e. read will return zero and we just exit() - * here. + * here (See evlist__cancel_workload()). */ if (ret != 1) { if (ret == -1) @@ -1508,7 +1510,7 @@ out_close_ready_pipe: int evlist__start_workload(struct evlist *evlist) { - if (evlist->workload.cork_fd > 0) { + if (evlist->workload.cork_fd >= 0) { char bf = 0; int ret; /* @@ -1519,12 +1521,24 @@ int evlist__start_workload(struct evlist *evlist) perror("unable to write to pipe"); close(evlist->workload.cork_fd); + evlist->workload.cork_fd = -1; return ret; } return 0; } +void evlist__cancel_workload(struct evlist *evlist) +{ + int status; + + if (evlist->workload.cork_fd >= 0) { + close(evlist->workload.cork_fd); + evlist->workload.cork_fd = -1; + waitpid(evlist->workload.pid, &status, WNOHANG); + } +} + int evlist__parse_sample(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) { struct evsel *evsel = evlist__event2evsel(evlist, event); diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 16734c6756b3..dc2197069cdc 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -187,6 +187,7 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const char *argv[], bool pipe_output, void (*exec_error)(int signo, siginfo_t *info, void *ucontext)); int evlist__start_workload(struct evlist *evlist); +void evlist__cancel_workload(struct evlist *evlist); struct option; diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 50d861a80f57..3c13597d0a0d 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -1490,6 +1490,10 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, if (ret >= 0 && tf.pf.skip_empty_arg) ret = fill_empty_trace_arg(pev, tf.tevs, tf.ntevs); +#if _ELFUTILS_PREREQ(0, 142) + dwarf_cfi_end(tf.pf.cfi_eh); +#endif + if (ret < 0 || tf.ntevs == 0) { for (i = 0; i < tf.ntevs; i++) clear_probe_trace_event(&tf.tevs[i]); @@ -1732,8 +1736,21 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr, /* Find a corresponding function (name, baseline and baseaddr) */ if (die_find_realfunc(&cudie, (Dwarf_Addr)addr, &spdie)) { - /* Get function entry information */ - func = basefunc = dwarf_diename(&spdie); + /* + * Get function entry information. + * + * As described in the document DWARF Debugging Information + * Format Version 5, section 2.22 Linkage Names, "mangled names, + * are used in various ways, ... to distinguish multiple + * entities that have the same name". + * + * Firstly try to get distinct linkage name, if fail then + * rollback to get associated name in DIE. + */ + func = basefunc = die_get_linkage_name(&spdie); + if (!func) + func = basefunc = dwarf_diename(&spdie); + if (!func || die_entrypc(&spdie, &baseaddr) != 0 || dwarf_decl_line(&spdie, &baseline) != 0) { diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 8bc1c80d3c1c..1f4650b95509 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -81,9 +81,9 @@ struct probe_finder { /* For variable searching */ #if _ELFUTILS_PREREQ(0, 142) - /* Call Frame Information from .eh_frame */ + /* Call Frame Information from .eh_frame. Owned by this struct. */ Dwarf_CFI *cfi_eh; - /* Call Frame Information from .debug_frame */ + /* Call Frame Information from .debug_frame. Not owned. */ Dwarf_CFI *cfi_dbg; #endif Dwarf_Op *fb_ops; /* Frame base attribute */ diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 5be5fa2391de..e25c3fcaeccc 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -181,7 +181,7 @@ struct pyrf_event { }; #define sample_members \ - sample_member_def(sample_ip, ip, T_ULONGLONG, "event type"), \ + sample_member_def(sample_ip, ip, T_ULONGLONG, "event ip"), \ sample_member_def(sample_pid, pid, T_INT, "event pid"), \ sample_member_def(sample_tid, tid, T_INT, "event tid"), \ sample_member_def(sample_time, time, T_ULONGLONG, "event timestamp"), \ @@ -610,6 +610,11 @@ static PyObject *pyrf_event__new(union perf_event *event) event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)) return NULL; + // FIXME this better be dynamic or we need to parse everything + // before calling perf_mmap__consume(), including tracepoint fields. + if (sizeof(pevent->event) < event->header.size) + return NULL; + ptype = pyrf_event__type[event->header.type]; pevent = PyObject_New(struct pyrf_event, ptype); if (pevent != NULL) @@ -1109,20 +1114,22 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, evsel = evlist__event2evsel(evlist, event); if (!evsel) { + Py_DECREF(pyevent); Py_INCREF(Py_None); return Py_None; } pevent->evsel = evsel; - err = evsel__parse_sample(evsel, event, &pevent->sample); - - /* Consume the even only after we parsed it out. */ perf_mmap__consume(&md->core); - if (err) + err = evsel__parse_sample(evsel, &pevent->event, &pevent->sample); + if (err) { + Py_DECREF(pyevent); return PyErr_Format(PyExc_OSError, "perf: can't parse sample, err=%d", err); + } + return pyevent; } end: diff --git a/tools/perf/util/units.c b/tools/perf/util/units.c index 32c39cfe209b..4c6a86e1cb54 100644 --- a/tools/perf/util/units.c +++ b/tools/perf/util/units.c @@ -64,7 +64,7 @@ unsigned long convert_unit(unsigned long value, char *unit) int unit_number__scnprintf(char *buf, size_t size, u64 n) { - char unit[4] = "BKMG"; + char unit[] = "BKMG"; int i = 0; while (((n / 1024) > 1) && (i < 3)) { diff --git a/tools/power/cpupower/bench/parse.c b/tools/power/cpupower/bench/parse.c index e63dc11fa3a5..48e25be6e163 100644 --- a/tools/power/cpupower/bench/parse.c +++ b/tools/power/cpupower/bench/parse.c @@ -120,6 +120,10 @@ out_dir: struct config *prepare_default_config() { struct config *config = malloc(sizeof(struct config)); + if (!config) { + perror("malloc"); + return NULL; + } dprintf("loading defaults\n"); diff --git a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c index ae6af354a81d..08a399b0be28 100644 --- a/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c +++ b/tools/power/cpupower/utils/idle_monitor/mperf_monitor.c @@ -33,7 +33,7 @@ static int mperf_get_count_percent(unsigned int self_id, double *percent, unsigned int cpu); static int mperf_get_count_freq(unsigned int id, unsigned long long *count, unsigned int cpu); -static struct timespec time_start, time_end; +static struct timespec *time_start, *time_end; static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = { { @@ -174,7 +174,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent, dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n", mperf_cstates[id].name, mperf_diff, tsc_diff); } else if (max_freq_mode == MAX_FREQ_SYSFS) { - timediff = max_frequency * timespec_diff_us(time_start, time_end); + timediff = max_frequency * timespec_diff_us(time_start[cpu], time_end[cpu]); *percent = 100.0 * mperf_diff / timediff; dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n", mperf_cstates[id].name, mperf_diff, timediff); @@ -207,7 +207,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count, if (max_freq_mode == MAX_FREQ_TSC_REF) { /* Calculate max_freq from TSC count */ tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu]; - time_diff = timespec_diff_us(time_start, time_end); + time_diff = timespec_diff_us(time_start[cpu], time_end[cpu]); max_frequency = tsc_diff / time_diff; } @@ -226,9 +226,8 @@ static int mperf_start(void) { int cpu; - clock_gettime(CLOCK_REALTIME, &time_start); - for (cpu = 0; cpu < cpu_count; cpu++) { + clock_gettime(CLOCK_REALTIME, &time_start[cpu]); mperf_get_tsc(&tsc_at_measure_start[cpu]); mperf_init_stats(cpu); } @@ -243,9 +242,9 @@ static int mperf_stop(void) for (cpu = 0; cpu < cpu_count; cpu++) { mperf_measure_stats(cpu); mperf_get_tsc(&tsc_at_measure_end[cpu]); + clock_gettime(CLOCK_REALTIME, &time_end[cpu]); } - clock_gettime(CLOCK_REALTIME, &time_end); return 0; } @@ -349,6 +348,8 @@ struct cpuidle_monitor *mperf_register(void) aperf_current_count = calloc(cpu_count, sizeof(unsigned long long)); tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long)); tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long)); + time_start = calloc(cpu_count, sizeof(struct timespec)); + time_end = calloc(cpu_count, sizeof(struct timespec)); mperf_monitor.name_len = strlen(mperf_monitor.name); return &mperf_monitor; } @@ -361,6 +362,8 @@ void mperf_unregister(void) free(aperf_current_count); free(tsc_at_measure_start); free(tsc_at_measure_end); + free(time_start); + free(time_end); free(is_valid); } diff --git a/tools/scripts/Makefile.arch b/tools/scripts/Makefile.arch index 0c6c7f456887..a57a9b752b6c 100644 --- a/tools/scripts/Makefile.arch +++ b/tools/scripts/Makefile.arch @@ -7,8 +7,8 @@ HOSTARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \ -e s/riscv.*/riscv/) -ifndef ARCH -ARCH := $(HOSTARCH) +ifeq ($(strip $(ARCH)),) +override ARCH := $(HOSTARCH) endif SRCARCH := $(ARCH) diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 99e17a0a1364..2109bd42c144 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -2399,6 +2399,11 @@ sub get_version { return if ($have_version); doprint "$make kernelrelease ... "; $version = `$make -s kernelrelease | tail -1`; + if (!length($version)) { + run_command "$make allnoconfig" or return 0; + doprint "$make kernelrelease ... "; + $version = `$make -s kernelrelease | tail -1`; + } chomp($version); doprint "$version\n"; $have_version = 1; @@ -2939,8 +2944,6 @@ sub run_bisect_test { my $failed = 0; my $result; - my $output; - my $ret; $in_bisect = 1; @@ -4279,6 +4282,14 @@ if (defined($opt{"LOG_FILE"})) { if ($opt{"CLEAR_LOG"}) { unlink $opt{"LOG_FILE"}; } + + if (! -e $opt{"LOG_FILE"} && $opt{"LOG_FILE"} =~ m,^(.*/),) { + my $dir = $1; + if (! -d $dir) { + mkpath($dir) or die "Failed to create directories '$dir': $!"; + print "\nThe log directory $dir did not exist, so it was created.\n"; + } + } open(LOG, ">> $opt{LOG_FILE}") or die "Can't write to $opt{LOG_FILE}"; LOG->autoflush(1); } diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py index dc7949076863..4a6bf4e048f5 100644 --- a/tools/testing/kunit/qemu_configs/x86_64.py +++ b/tools/testing/kunit/qemu_configs/x86_64.py @@ -7,4 +7,6 @@ CONFIG_SERIAL_8250_CONSOLE=y''', qemu_arch='x86_64', kernel_path='arch/x86/boot/bzImage', kernel_command_line='console=ttyS0', - extra_qemu_params=[]) + # qboot is faster than SeaBIOS and doesn't mess up + # the terminal. + extra_qemu_params=['-bios', 'qboot.rom']) diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c index d587a558997f..11149bd12a1f 100644 --- a/tools/testing/radix-tree/linux.c +++ b/tools/testing/radix-tree/linux.c @@ -121,7 +121,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list) { if (kmalloc_verbose) - pr_debug("Bulk free %p[0-%lu]\n", list, size - 1); + pr_debug("Bulk free %p[0-%zu]\n", list, size - 1); pthread_mutex_lock(&cachep->lock); for (int i = 0; i < size; i++) @@ -139,7 +139,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, size_t i; if (kmalloc_verbose) - pr_debug("Bulk alloc %lu\n", size); + pr_debug("Bulk alloc %zu\n", size); if (!(gfp & __GFP_DIRECT_RECLAIM)) { if (cachep->non_kernel < size) diff --git a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c index 2b1425b92b69..a3d1e23fe02a 100644 --- a/tools/testing/selftests/arm64/mte/check_tags_inclusion.c +++ b/tools/testing/selftests/arm64/mte/check_tags_inclusion.c @@ -65,7 +65,7 @@ static int check_single_included_tags(int mem_type, int mode) ptr = mte_insert_tags(ptr, BUFFER_SIZE); /* Check tag value */ if (MT_FETCH_TAG((uintptr_t)ptr) == tag) { - ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n", + ksft_print_msg("FAIL: wrong tag = 0x%lx with include mask=0x%x\n", MT_FETCH_TAG((uintptr_t)ptr), MT_INCLUDE_VALID_TAG(tag)); result = KSFT_FAIL; @@ -97,7 +97,7 @@ static int check_multiple_included_tags(int mem_type, int mode) ptr = mte_insert_tags(ptr, BUFFER_SIZE); /* Check tag value */ if (MT_FETCH_TAG((uintptr_t)ptr) < tag) { - ksft_print_msg("FAIL: wrong tag = 0x%x with include mask=0x%x\n", + ksft_print_msg("FAIL: wrong tag = 0x%lx with include mask=0x%lx\n", MT_FETCH_TAG((uintptr_t)ptr), MT_INCLUDE_VALID_TAGS(excl_mask)); result = KSFT_FAIL; diff --git a/tools/testing/selftests/arm64/mte/mte_common_util.c b/tools/testing/selftests/arm64/mte/mte_common_util.c index 00ffd34c66d3..1120f5aa7655 100644 --- a/tools/testing/selftests/arm64/mte/mte_common_util.c +++ b/tools/testing/selftests/arm64/mte/mte_common_util.c @@ -38,7 +38,7 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc) if (cur_mte_cxt.trig_si_code == si->si_code) cur_mte_cxt.fault_valid = true; else - ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=$lx, fault addr=%lx\n", + ksft_print_msg("Got unexpected SEGV_MTEAERR at pc=%llx, fault addr=%lx\n", ((ucontext_t *)uc)->uc_mcontext.pc, addr); return; @@ -64,7 +64,7 @@ void mte_default_handler(int signum, siginfo_t *si, void *uc) exit(1); } } else if (signum == SIGBUS) { - ksft_print_msg("INFO: SIGBUS signal at pc=%lx, fault addr=%lx, si_code=%lx\n", + ksft_print_msg("INFO: SIGBUS signal at pc=%llx, fault addr=%lx, si_code=%x\n", ((ucontext_t *)uc)->uc_mcontext.pc, addr, si->si_code); if ((cur_mte_cxt.trig_range >= 0 && addr >= MT_CLEAR_TAG(cur_mte_cxt.trig_addr) && diff --git a/tools/testing/selftests/arm64/pauth/pac.c b/tools/testing/selftests/arm64/pauth/pac.c index b743daa772f5..5a07b3958fbf 100644 --- a/tools/testing/selftests/arm64/pauth/pac.c +++ b/tools/testing/selftests/arm64/pauth/pac.c @@ -182,6 +182,9 @@ int exec_sign_all(struct signatures *signed_vals, size_t val) return -1; } + close(new_stdin[1]); + close(new_stdout[0]); + return 0; } diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index f882c691b790..535b72623dd4 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __NETWORK_HELPERS_H #define __NETWORK_HELPERS_H +#include #include #include #include @@ -66,4 +67,47 @@ struct nstoken; */ struct nstoken *open_netns(const char *name); void close_netns(struct nstoken *token); + +static __u16 csum_fold(__u32 csum) +{ + csum = (csum & 0xffff) + (csum >> 16); + csum = (csum & 0xffff) + (csum >> 16); + + return (__u16)~csum; +} + +static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, + __wsum csum) +{ + __u64 s = csum; + + s += (__u32)saddr; + s += (__u32)daddr; + s += htons(proto + len); + s = (s & 0xffffffff) + (s >> 32); + s = (s & 0xffffffff) + (s >> 32); + + return csum_fold((__u32)s); +} + +static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, + __wsum csum) +{ + __u64 s = csum; + int i; + + for (i = 0; i < 4; i++) + s += (__u32)saddr->s6_addr32[i]; + for (i = 0; i < 4; i++) + s += (__u32)daddr->s6_addr32[i]; + s += htons(proto + len); + s = (s & 0xffffffff) + (s >> 32); + s = (s & 0xffffffff) + (s >> 32); + + return csum_fold((__u32)s); +} + #endif diff --git a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c index f79815b7e951..fff16cdc93f2 100644 --- a/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c +++ b/tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c @@ -6,6 +6,10 @@ #include #include "bloom_filter_map.skel.h" +#ifndef NUMA_NO_NODE +#define NUMA_NO_NODE (-1) +#endif + static void test_fail_cases(void) { LIBBPF_OPTS(bpf_map_create_opts, opts); @@ -69,6 +73,7 @@ static void test_success_cases(void) /* Create a map */ opts.map_flags = BPF_F_ZERO_SEED | BPF_F_NUMA_NODE; + opts.numa_node = NUMA_NO_NODE; fd = bpf_map_create(BPF_MAP_TYPE_BLOOM_FILTER, NULL, 0, sizeof(value), 100, &opts); if (!ASSERT_GE(fd, 0, "bpf_map_create bloom filter success case")) return; diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c index 2d0796314862..0a99fd404f6d 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c +++ b/tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c @@ -68,7 +68,6 @@ static void test_sockmap_ktls_disconnect_after_delete(int family, int map) goto close_cli; err = disconnect(cli); - ASSERT_OK(err, "disconnect"); close_cli: close(cli); diff --git a/tools/testing/selftests/bpf/progs/strncmp_bench.c b/tools/testing/selftests/bpf/progs/strncmp_bench.c index 18373a7df76e..f47bf88f8d2a 100644 --- a/tools/testing/selftests/bpf/progs/strncmp_bench.c +++ b/tools/testing/selftests/bpf/progs/strncmp_bench.c @@ -35,7 +35,10 @@ static __always_inline int local_strncmp(const char *s1, unsigned int sz, SEC("tp/syscalls/sys_enter_getpgid") int strncmp_no_helper(void *ctx) { - if (local_strncmp(str, cmp_str_len + 1, target) < 0) + const char *target_str = target; + + barrier_var(target_str); + if (local_strncmp(str, cmp_str_len + 1, target_str) < 0) __sync_add_and_fetch(&hits, 1); return 0; } diff --git a/tools/testing/selftests/bpf/sdt.h b/tools/testing/selftests/bpf/sdt.h index ca0162b4dc57..1fcfa5160231 100644 --- a/tools/testing/selftests/bpf/sdt.h +++ b/tools/testing/selftests/bpf/sdt.h @@ -102,6 +102,8 @@ # define STAP_SDT_ARG_CONSTRAINT nZr # elif defined __arm__ # define STAP_SDT_ARG_CONSTRAINT g +# elif defined __loongarch__ +# define STAP_SDT_ARG_CONSTRAINT nmr # else # define STAP_SDT_ARG_CONSTRAINT nor # endif diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 11d4c51c7d21..6068f5bde8fd 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -16,11 +16,13 @@ #include #include +/* backtrace() and backtrace_symbols_fd() are glibc specific, + * use header file when glibc is available and provide stub + * implementations when another libc implementation is used. + */ #ifdef __GLIBC__ #include /* backtrace */ -#endif - -/* Default backtrace funcs if missing at link */ +#else __weak int backtrace(void **buffer, int size) { return 0; @@ -30,6 +32,7 @@ __weak void backtrace_symbols_fd(void *const *buffer, int size, int fd) { dprintf(fd, "\n"); } +#endif /*__GLIBC__ */ static bool verbose(void) { diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 25da05cad8f6..d0284ef699bb 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -56,6 +56,8 @@ static void running_handler(int a); #define BPF_SOCKHASH_FILENAME "test_sockhash_kern.bpf.o" #define CG_PATH "/sockmap" +#define EDATAINTEGRITY 2001 + /* global sockets */ int s1, s2, c1, c2, p1, p2; int test_cnt; @@ -85,6 +87,10 @@ int ktls; int peek_flag; int skb_use_parser; int txmsg_omit_skb_parser; +int verify_push_start; +int verify_push_len; +int verify_pop_start; +int verify_pop_len; static const struct option long_options[] = { {"help", no_argument, NULL, 'h' }, @@ -417,16 +423,18 @@ static int msg_loop_sendpage(int fd, int iov_length, int cnt, { bool drop = opt->drop_expected; unsigned char k = 0; + int i, j, fp; FILE *file; - int i, fp; file = tmpfile(); if (!file) { perror("create file for sendpage"); return 1; } - for (i = 0; i < iov_length * cnt; i++, k++) - fwrite(&k, sizeof(char), 1, file); + for (i = 0; i < cnt; i++, k = 0) { + for (j = 0; j < iov_length; j++, k++) + fwrite(&k, sizeof(char), 1, file); + } fflush(file); fseek(file, 0, SEEK_SET); @@ -509,42 +517,111 @@ unwind_iov: return -ENOMEM; } -static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz) +/* In push or pop test, we need to do some calculations for msg_verify_data */ +static void msg_verify_date_prep(void) { - int i, j = 0, bytes_cnt = 0; - unsigned char k = 0; + int push_range_end = txmsg_start_push + txmsg_end_push - 1; + int pop_range_end = txmsg_start_pop + txmsg_pop - 1; - for (i = 0; i < msg->msg_iovlen; i++) { + if (txmsg_end_push && txmsg_pop && + txmsg_start_push <= pop_range_end && txmsg_start_pop <= push_range_end) { + /* The push range and the pop range overlap */ + int overlap_len; + + verify_push_start = txmsg_start_push; + verify_pop_start = txmsg_start_pop; + if (txmsg_start_push < txmsg_start_pop) + overlap_len = min(push_range_end - txmsg_start_pop + 1, txmsg_pop); + else + overlap_len = min(pop_range_end - txmsg_start_push + 1, txmsg_end_push); + verify_push_len = max(txmsg_end_push - overlap_len, 0); + verify_pop_len = max(txmsg_pop - overlap_len, 0); + } else { + /* Otherwise */ + verify_push_start = txmsg_start_push; + verify_pop_start = txmsg_start_pop; + verify_push_len = txmsg_end_push; + verify_pop_len = txmsg_pop; + } +} + +static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz, + unsigned char *k_p, int *bytes_cnt_p, + int *check_cnt_p, int *push_p) +{ + int bytes_cnt = *bytes_cnt_p, check_cnt = *check_cnt_p, push = *push_p; + unsigned char k = *k_p; + int i, j; + + for (i = 0, j = 0; i < msg->msg_iovlen && size; i++, j = 0) { unsigned char *d = msg->msg_iov[i].iov_base; /* Special case test for skb ingress + ktls */ if (i == 0 && txmsg_ktls_skb) { if (msg->msg_iov[i].iov_len < 4) - return -EIO; + return -EDATAINTEGRITY; if (memcmp(d, "PASS", 4) != 0) { fprintf(stderr, "detected skb data error with skb ingress update @iov[%i]:%i \"%02x %02x %02x %02x\" != \"PASS\"\n", i, 0, d[0], d[1], d[2], d[3]); - return -EIO; + return -EDATAINTEGRITY; } j = 4; /* advance index past PASS header */ } for (; j < msg->msg_iov[i].iov_len && size; j++) { + if (push > 0 && + check_cnt == verify_push_start + verify_push_len - push) { + int skipped; +revisit_push: + skipped = push; + if (j + push >= msg->msg_iov[i].iov_len) + skipped = msg->msg_iov[i].iov_len - j; + push -= skipped; + size -= skipped; + j += skipped - 1; + check_cnt += skipped; + continue; + } + + if (verify_pop_len > 0 && check_cnt == verify_pop_start) { + bytes_cnt += verify_pop_len; + check_cnt += verify_pop_len; + k += verify_pop_len; + + if (bytes_cnt == chunk_sz) { + k = 0; + bytes_cnt = 0; + check_cnt = 0; + push = verify_push_len; + } + + if (push > 0 && + check_cnt == verify_push_start + verify_push_len - push) + goto revisit_push; + } + if (d[j] != k++) { fprintf(stderr, "detected data corruption @iov[%i]:%i %02x != %02x, %02x ?= %02x\n", i, j, d[j], k - 1, d[j+1], k); - return -EIO; + return -EDATAINTEGRITY; } bytes_cnt++; + check_cnt++; if (bytes_cnt == chunk_sz) { k = 0; bytes_cnt = 0; + check_cnt = 0; + push = verify_push_len; } size--; } } + *k_p = k; + *bytes_cnt_p = bytes_cnt; + *check_cnt_p = check_cnt; + *push_p = push; return 0; } @@ -597,10 +674,14 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, } clock_gettime(CLOCK_MONOTONIC, &s->end); } else { + float total_bytes, txmsg_pop_total, txmsg_push_total; int slct, recvp = 0, recv, max_fd = fd; - float total_bytes, txmsg_pop_total; int fd_flags = O_NONBLOCK; struct timeval timeout; + unsigned char k = 0; + int bytes_cnt = 0; + int check_cnt = 0; + int push = 0; fd_set w; fcntl(fd, fd_flags); @@ -614,12 +695,22 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, * This is really only useful for testing edge cases in code * paths. */ - total_bytes = (float)iov_count * (float)iov_length * (float)cnt; - if (txmsg_apply) + total_bytes = (float)iov_length * (float)cnt; + if (!opt->sendpage) + total_bytes *= (float)iov_count; + if (txmsg_apply) { + txmsg_push_total = txmsg_end_push * (total_bytes / txmsg_apply); txmsg_pop_total = txmsg_pop * (total_bytes / txmsg_apply); - else + } else { + txmsg_push_total = txmsg_end_push * cnt; txmsg_pop_total = txmsg_pop * cnt; + } + total_bytes += txmsg_push_total; total_bytes -= txmsg_pop_total; + if (data) { + msg_verify_date_prep(); + push = verify_push_len; + } err = clock_gettime(CLOCK_MONOTONIC, &s->start); if (err < 0) perror("recv start time"); @@ -692,10 +783,11 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, if (data) { int chunk_sz = opt->sendpage ? - iov_length * cnt : + iov_length : iov_length * iov_count; - errno = msg_verify_data(&msg, recv, chunk_sz); + errno = msg_verify_data(&msg, recv, chunk_sz, &k, &bytes_cnt, + &check_cnt, &push); if (errno) { perror("data verify msg failed"); goto out_errno; @@ -703,7 +795,11 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, if (recvp) { errno = msg_verify_data(&msg_peek, recvp, - chunk_sz); + chunk_sz, + &k, + &bytes_cnt, + &check_cnt, + &push); if (errno) { perror("data verify msg_peek failed"); goto out_errno; @@ -785,8 +881,6 @@ static int sendmsg_test(struct sockmap_options *opt) rxpid = fork(); if (rxpid == 0) { - if (txmsg_pop || txmsg_start_pop) - iov_buf -= (txmsg_pop - txmsg_start_pop + 1); if (opt->drop_expected || txmsg_ktls_skb_drop) _exit(0); @@ -811,7 +905,7 @@ static int sendmsg_test(struct sockmap_options *opt) s.bytes_sent, sent_Bps, sent_Bps/giga, s.bytes_recvd, recvd_Bps, recvd_Bps/giga, peek_flag ? "(peek_msg)" : ""); - if (err && txmsg_cork) + if (err && err != -EDATAINTEGRITY && txmsg_cork) err = 0; exit(err ? 1 : 0); } else if (rxpid == -1) { @@ -1459,8 +1553,8 @@ static void test_send_many(struct sockmap_options *opt, int cgrp) static void test_send_large(struct sockmap_options *opt, int cgrp) { - opt->iov_length = 256; - opt->iov_count = 1024; + opt->iov_length = 8192; + opt->iov_count = 32; opt->rate = 2; test_exec(cgrp, opt); } @@ -1589,17 +1683,19 @@ static void test_txmsg_cork_hangs(int cgrp, struct sockmap_options *opt) static void test_txmsg_pull(int cgrp, struct sockmap_options *opt) { /* Test basic start/end */ + txmsg_pass = 1; txmsg_start = 1; txmsg_end = 2; test_send(opt, cgrp); /* Test >4k pull */ + txmsg_pass = 1; txmsg_start = 4096; txmsg_end = 9182; test_send_large(opt, cgrp); /* Test pull + redirect */ - txmsg_redir = 0; + txmsg_redir = 1; txmsg_start = 1; txmsg_end = 2; test_send(opt, cgrp); @@ -1621,12 +1717,16 @@ static void test_txmsg_pull(int cgrp, struct sockmap_options *opt) static void test_txmsg_pop(int cgrp, struct sockmap_options *opt) { + bool data = opt->data_test; + /* Test basic pop */ + txmsg_pass = 1; txmsg_start_pop = 1; txmsg_pop = 2; test_send_many(opt, cgrp); /* Test pop with >4k */ + txmsg_pass = 1; txmsg_start_pop = 4096; txmsg_pop = 4096; test_send_large(opt, cgrp); @@ -1637,6 +1737,12 @@ static void test_txmsg_pop(int cgrp, struct sockmap_options *opt) txmsg_pop = 2; test_send_many(opt, cgrp); + /* TODO: Test for pop + cork should be different, + * - It makes the layout of the received data difficult + * - It makes it hard to calculate the total_bytes in the recvmsg + * Temporarily skip the data integrity test for this case now. + */ + opt->data_test = false; /* Test pop + cork */ txmsg_redir = 0; txmsg_cork = 512; @@ -1650,16 +1756,21 @@ static void test_txmsg_pop(int cgrp, struct sockmap_options *opt) txmsg_start_pop = 1; txmsg_pop = 2; test_send_many(opt, cgrp); + opt->data_test = data; } static void test_txmsg_push(int cgrp, struct sockmap_options *opt) { + bool data = opt->data_test; + /* Test basic push */ + txmsg_pass = 1; txmsg_start_push = 1; txmsg_end_push = 1; test_send(opt, cgrp); /* Test push 4kB >4k */ + txmsg_pass = 1; txmsg_start_push = 4096; txmsg_end_push = 4096; test_send_large(opt, cgrp); @@ -1670,16 +1781,24 @@ static void test_txmsg_push(int cgrp, struct sockmap_options *opt) txmsg_end_push = 2; test_send_many(opt, cgrp); + /* TODO: Test for push + cork should be different, + * - It makes the layout of the received data difficult + * - It makes it hard to calculate the total_bytes in the recvmsg + * Temporarily skip the data integrity test for this case now. + */ + opt->data_test = false; /* Test push + cork */ txmsg_redir = 0; txmsg_cork = 512; txmsg_start_push = 1; txmsg_end_push = 2; test_send_many(opt, cgrp); + opt->data_test = data; } static void test_txmsg_push_pop(int cgrp, struct sockmap_options *opt) { + txmsg_pass = 1; txmsg_start_push = 1; txmsg_end_push = 10; txmsg_start_pop = 5; diff --git a/tools/testing/selftests/bpf/test_tc_tunnel.sh b/tools/testing/selftests/bpf/test_tc_tunnel.sh index 365a2c7a89ba..8766e88b5a40 100755 --- a/tools/testing/selftests/bpf/test_tc_tunnel.sh +++ b/tools/testing/selftests/bpf/test_tc_tunnel.sh @@ -296,6 +296,7 @@ else client_connect verify_data server_listen + wait_for_port ${port} ${netcat_opt} fi # bpf_skb_net_shrink does not take tunnel flags yet, cannot update L3. diff --git a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh index 0c47faff9274..c068e6c2a580 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh @@ -22,20 +22,34 @@ SB_ITC=0 h1_create() { simple_if_init $h1 192.0.1.1/24 + tc qdisc add dev $h1 clsact + + # Add egress filter on $h1 that will guarantee that the packet sent, + # will be the only packet being passed to the device. + tc filter add dev $h1 egress pref 2 handle 102 matchall action drop } h1_destroy() { + tc filter del dev $h1 egress pref 2 handle 102 matchall action drop + tc qdisc del dev $h1 clsact simple_if_fini $h1 192.0.1.1/24 } h2_create() { simple_if_init $h2 192.0.1.2/24 + tc qdisc add dev $h2 clsact + + # Add egress filter on $h2 that will guarantee that the packet sent, + # will be the only packet being passed to the device. + tc filter add dev $h2 egress pref 1 handle 101 matchall action drop } h2_destroy() { + tc filter del dev $h2 egress pref 1 handle 101 matchall action drop + tc qdisc del dev $h2 clsact simple_if_fini $h2 192.0.1.2/24 } @@ -101,6 +115,11 @@ port_pool_test() local exp_max_occ=$(devlink_cell_size_get) local max_occ + tc filter add dev $h1 egress protocol ip pref 1 handle 101 flower \ + src_mac $h1mac dst_mac $h2mac \ + src_ip 192.0.1.1 dst_ip 192.0.1.2 \ + action pass + devlink sb occupancy clearmax $DEVLINK_DEV $MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ @@ -108,11 +127,6 @@ port_pool_test() devlink sb occupancy snapshot $DEVLINK_DEV - RET=0 - max_occ=$(sb_occ_pool_check $dl_port1 $SB_POOL_ING $exp_max_occ) - check_err $? "Expected iPool($SB_POOL_ING) max occupancy to be $exp_max_occ, but got $max_occ" - log_test "physical port's($h1) ingress pool" - RET=0 max_occ=$(sb_occ_pool_check $dl_port2 $SB_POOL_ING $exp_max_occ) check_err $? "Expected iPool($SB_POOL_ING) max occupancy to be $exp_max_occ, but got $max_occ" @@ -122,6 +136,11 @@ port_pool_test() max_occ=$(sb_occ_pool_check $cpu_dl_port $SB_POOL_EGR_CPU $exp_max_occ) check_err $? "Expected ePool($SB_POOL_EGR_CPU) max occupancy to be $exp_max_occ, but got $max_occ" log_test "CPU port's egress pool" + + tc filter del dev $h1 egress protocol ip pref 1 handle 101 flower \ + src_mac $h1mac dst_mac $h2mac \ + src_ip 192.0.1.1 dst_ip 192.0.1.2 \ + action pass } port_tc_ip_test() @@ -129,6 +148,11 @@ port_tc_ip_test() local exp_max_occ=$(devlink_cell_size_get) local max_occ + tc filter add dev $h1 egress protocol ip pref 1 handle 101 flower \ + src_mac $h1mac dst_mac $h2mac \ + src_ip 192.0.1.1 dst_ip 192.0.1.2 \ + action pass + devlink sb occupancy clearmax $DEVLINK_DEV $MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ @@ -136,11 +160,6 @@ port_tc_ip_test() devlink sb occupancy snapshot $DEVLINK_DEV - RET=0 - max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ) - check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ" - log_test "physical port's($h1) ingress TC - IP packet" - RET=0 max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ) check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ" @@ -150,6 +169,11 @@ port_tc_ip_test() max_occ=$(sb_occ_etc_check $cpu_dl_port $SB_ITC_CPU_IP $exp_max_occ) check_err $? "Expected egress TC($SB_ITC_CPU_IP) max occupancy to be $exp_max_occ, but got $max_occ" log_test "CPU port's egress TC - IP packet" + + tc filter del dev $h1 egress protocol ip pref 1 handle 101 flower \ + src_mac $h1mac dst_mac $h2mac \ + src_ip 192.0.1.1 dst_ip 192.0.1.2 \ + action pass } port_tc_arp_test() @@ -157,17 +181,15 @@ port_tc_arp_test() local exp_max_occ=$(devlink_cell_size_get) local max_occ + tc filter add dev $h1 egress protocol arp pref 1 handle 101 flower \ + src_mac $h1mac action pass + devlink sb occupancy clearmax $DEVLINK_DEV $MZ $h1 -c 1 -p 10 -a $h1mac -A 192.0.1.1 -t arp -q devlink sb occupancy snapshot $DEVLINK_DEV - RET=0 - max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ) - check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ" - log_test "physical port's($h1) ingress TC - ARP packet" - RET=0 max_occ=$(sb_occ_itc_check $dl_port2 $SB_ITC $exp_max_occ) check_err $? "Expected ingress TC($SB_ITC) max occupancy to be $exp_max_occ, but got $max_occ" @@ -177,6 +199,9 @@ port_tc_arp_test() max_occ=$(sb_occ_etc_check $cpu_dl_port $SB_ITC_CPU_ARP $exp_max_occ) check_err $? "Expected egress TC($SB_ITC_IP2ME) max occupancy to be $exp_max_occ, but got $max_occ" log_test "CPU port's egress TC - ARP packet" + + tc filter del dev $h1 egress protocol arp pref 1 handle 101 flower \ + src_mac $h1mac action pass } setup_prepare() diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh index 185b02d2d4cd..7af78990b5bb 100755 --- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh +++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh @@ -142,7 +142,7 @@ function pre_ethtool { } function check_table { - local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1 + local path=$NSIM_DEV_DFS/ports/$port/udp_ports/table$1 local -n expected=$2 local last=$3 @@ -212,7 +212,7 @@ function check_tables { } function print_table { - local path=$NSIM_DEV_DFS/ports/$port/udp_ports_table$1 + local path=$NSIM_DEV_DFS/ports/$port/udp_ports/table$1 read -a have < $path tree $NSIM_DEV_DFS/ @@ -640,7 +640,7 @@ for port in 0 1; do NSIM_NETDEV=`get_netdev_name old_netdevs` ifconfig $NSIM_NETDEV up - echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error + echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports/inject_error msg="1 - create VxLANs v6" exp0=( 0 0 0 0 ) @@ -662,7 +662,7 @@ for port in 0 1; do new_geneve gnv0 20000 msg="2 - destroy GENEVE" - echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error + echo 2 > $NSIM_DEV_DFS/ports/$port/udp_ports/inject_error exp1=( `mke 20000 2` 0 0 0 ) del_dev gnv0 @@ -763,7 +763,7 @@ for port in 0 1; do msg="create VxLANs v4" new_vxlan vxlan0 10000 $NSIM_NETDEV - echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset check_tables msg="NIC device goes down" @@ -774,7 +774,7 @@ for port in 0 1; do fi check_tables - echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset check_tables msg="NIC device goes up again" @@ -788,7 +788,7 @@ for port in 0 1; do del_dev vxlan0 check_tables - echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset + echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset check_tables msg="destroy NIC" @@ -895,7 +895,7 @@ msg="vacate VxLAN in overflow table" exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` ) del_dev vxlan2 -echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset +echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports/reset check_tables msg="tunnels destroyed 2" diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index a0b8688b0836..a705493c04bb 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile @@ -3,8 +3,13 @@ CFLAGS = -Wall CFLAGS += -Wno-nonnull CFLAGS += -D_GNU_SOURCE +ALIGNS := 0x1000 0x200000 0x1000000 +ALIGN_PIES := $(patsubst %,load_address.%,$(ALIGNS)) +ALIGN_STATIC_PIES := $(patsubst %,load_address.static.%,$(ALIGNS)) +ALIGNMENT_TESTS := $(ALIGN_PIES) $(ALIGN_STATIC_PIES) + TEST_PROGS := binfmt_script.py -TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 non-regular +TEST_GEN_PROGS := execveat non-regular $(ALIGNMENT_TESTS) TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir # Makefile is a run-time dependency, since it's accessed by the execveat test TEST_FILES := Makefile @@ -28,9 +33,9 @@ $(OUTPUT)/execveat.symlink: $(OUTPUT)/execveat $(OUTPUT)/execveat.denatured: $(OUTPUT)/execveat cp $< $@ chmod -x $@ -$(OUTPUT)/load_address_4096: load_address.c - $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000 -pie -static $< -o $@ -$(OUTPUT)/load_address_2097152: load_address.c - $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x200000 -pie -static $< -o $@ -$(OUTPUT)/load_address_16777216: load_address.c - $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=0x1000000 -pie -static $< -o $@ +$(OUTPUT)/load_address.0x%: load_address.c + $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \ + -fPIE -pie $< -o $@ +$(OUTPUT)/load_address.static.0x%: load_address.c + $(CC) $(CFLAGS) $(LDFLAGS) -Wl,-z,max-page-size=$(lastword $(subst ., ,$@)) \ + -fPIE -static-pie $< -o $@ diff --git a/tools/testing/selftests/exec/load_address.c b/tools/testing/selftests/exec/load_address.c index d487c2f6a615..8257fddba8c8 100644 --- a/tools/testing/selftests/exec/load_address.c +++ b/tools/testing/selftests/exec/load_address.c @@ -5,10 +5,13 @@ #include #include #include +#include +#include "../kselftest.h" struct Statistics { unsigned long long load_address; unsigned long long alignment; + bool interp; }; int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) @@ -25,11 +28,20 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) stats->alignment = 0; for (i = 0; i < info->dlpi_phnum; i++) { + unsigned long long align; + + if (info->dlpi_phdr[i].p_type == PT_INTERP) { + stats->interp = true; + continue; + } + if (info->dlpi_phdr[i].p_type != PT_LOAD) continue; - if (info->dlpi_phdr[i].p_align > stats->alignment) - stats->alignment = info->dlpi_phdr[i].p_align; + align = info->dlpi_phdr[i].p_align; + + if (align > stats->alignment) + stats->alignment = align; } return 1; // Terminate dl_iterate_phdr. @@ -37,32 +49,57 @@ int ExtractStatistics(struct dl_phdr_info *info, size_t size, void *data) int main(int argc, char **argv) { - struct Statistics extracted; - unsigned long long misalign; + struct Statistics extracted = { }; + unsigned long long misalign, pow2; + bool interp_needed; + char buf[1024]; + FILE *maps; int ret; + ksft_print_header(); + ksft_set_plan(4); + + /* Dump maps file for debugging reference. */ + maps = fopen("/proc/self/maps", "r"); + if (!maps) + ksft_exit_fail_msg("FAILED: /proc/self/maps: %s\n", strerror(errno)); + while (fgets(buf, sizeof(buf), maps)) { + ksft_print_msg("%s", buf); + } + fclose(maps); + + /* Walk the program headers. */ ret = dl_iterate_phdr(ExtractStatistics, &extracted); - if (ret != 1) { - fprintf(stderr, "FAILED\n"); - return 1; - } + if (ret != 1) + ksft_exit_fail_msg("FAILED: dl_iterate_phdr\n"); - if (extracted.alignment == 0) { - fprintf(stderr, "No alignment found\n"); - return 1; - } else if (extracted.alignment & (extracted.alignment - 1)) { - fprintf(stderr, "Alignment is not a power of 2\n"); - return 1; - } + /* Report our findings. */ + ksft_print_msg("load_address=%#llx alignment=%#llx\n", + extracted.load_address, extracted.alignment); + /* If we're named with ".static." we expect no INTERP. */ + interp_needed = strstr(argv[0], ".static.") == NULL; + + /* Were we built as expected? */ + ksft_test_result(interp_needed == extracted.interp, + "%s INTERP program header %s\n", + interp_needed ? "Wanted" : "Unwanted", + extracted.interp ? "seen" : "missing"); + + /* Did we find an alignment? */ + ksft_test_result(extracted.alignment != 0, + "Alignment%s found\n", extracted.alignment ? "" : " NOT"); + + /* Is the alignment sane? */ + pow2 = extracted.alignment & (extracted.alignment - 1); + ksft_test_result(pow2 == 0, + "Alignment is%s a power of 2: %#llx\n", + pow2 == 0 ? "" : " NOT", extracted.alignment); + + /* Is the load address aligned? */ misalign = extracted.load_address & (extracted.alignment - 1); - if (misalign) { - printf("alignment = %llu, load_address = %llu\n", - extracted.alignment, extracted.load_address); - fprintf(stderr, "FAILED\n"); - return 1; - } + ksft_test_result(misalign == 0, "Load Address is %saligned (%#llx)\n", + misalign ? "MIS" : "", misalign); - fprintf(stderr, "PASS\n"); - return 0; + ksft_finished(); } diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c index 7d7a6a06cdb7..2d8230da9064 100644 --- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c +++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c @@ -98,7 +98,7 @@ int main(int argc, char *argv[]) info("Calling futex_waitv on f1: %u @ %p with val=%u\n", f1, &f1, f1+1); res = futex_waitv(&waitv, 1, 0, &to, CLOCK_MONOTONIC); if (!res || errno != EWOULDBLOCK) { - ksft_test_result_pass("futex_waitv returned: %d %s\n", + ksft_test_result_fail("futex_waitv returned: %d %s\n", res ? errno : res, res ? strerror(errno) : ""); ret = RET_FAIL; diff --git a/tools/testing/selftests/gpio/gpio-sim.sh b/tools/testing/selftests/gpio/gpio-sim.sh index bf67b23ed29a..46101a800beb 100755 --- a/tools/testing/selftests/gpio/gpio-sim.sh +++ b/tools/testing/selftests/gpio/gpio-sim.sh @@ -46,12 +46,6 @@ remove_chip() { rmdir $CONFIGFS_DIR/$CHIP || fail "Unable to remove the chip" } -configfs_cleanup() { - for CHIP in `ls $CONFIGFS_DIR/`; do - remove_chip $CHIP - done -} - create_chip() { local CHIP=$1 @@ -105,6 +99,13 @@ disable_chip() { echo 0 > $CONFIGFS_DIR/$CHIP/live || fail "Unable to disable the chip" } +configfs_cleanup() { + for CHIP in `ls $CONFIGFS_DIR/`; do + disable_chip $CHIP + remove_chip $CHIP + done +} + configfs_chip_name() { local CHIP=$1 local BANK=$2 @@ -181,6 +182,7 @@ create_chip chip create_bank chip bank enable_chip chip test -n `cat $CONFIGFS_DIR/chip/bank/chip_name` || fail "chip_name doesn't work" +disable_chip chip remove_chip chip echo "1.2. chip_name returns 'none' if the chip is still pending" @@ -195,6 +197,7 @@ create_chip chip create_bank chip bank enable_chip chip test -n `cat $CONFIGFS_DIR/chip/dev_name` || fail "dev_name doesn't work" +disable_chip chip remove_chip chip echo "2. Creating and configuring simulated chips" @@ -204,6 +207,7 @@ create_chip chip create_bank chip bank enable_chip chip test "`get_chip_num_lines chip bank`" = "1" || fail "default number of lines is not 1" +disable_chip chip remove_chip chip echo "2.2. Number of lines can be specified" @@ -212,6 +216,7 @@ create_bank chip bank set_num_lines chip bank 16 enable_chip chip test "`get_chip_num_lines chip bank`" = "16" || fail "number of lines is not 16" +disable_chip chip remove_chip chip echo "2.3. Label can be set" @@ -220,6 +225,7 @@ create_bank chip bank set_label chip bank foobar enable_chip chip test "`get_chip_label chip bank`" = "foobar" || fail "label is incorrect" +disable_chip chip remove_chip chip echo "2.4. Label can be left empty" @@ -227,6 +233,7 @@ create_chip chip create_bank chip bank enable_chip chip test -z "`cat $CONFIGFS_DIR/chip/bank/label`" || fail "label is not empty" +disable_chip chip remove_chip chip echo "2.5. Line names can be configured" @@ -238,6 +245,7 @@ set_line_name chip bank 2 bar enable_chip chip test "`get_line_name chip bank 0`" = "foo" || fail "line name is incorrect" test "`get_line_name chip bank 2`" = "bar" || fail "line name is incorrect" +disable_chip chip remove_chip chip echo "2.6. Line config can remain unused if offset is greater than number of lines" @@ -248,6 +256,7 @@ set_line_name chip bank 5 foobar enable_chip chip test "`get_line_name chip bank 0`" = "" || fail "line name is incorrect" test "`get_line_name chip bank 1`" = "" || fail "line name is incorrect" +disable_chip chip remove_chip chip echo "2.7. Line configfs directory names are sanitized" @@ -267,6 +276,7 @@ for CHIP in $CHIPS; do enable_chip $CHIP done for CHIP in $CHIPS; do + disable_chip $CHIP remove_chip $CHIP done @@ -278,6 +288,7 @@ echo foobar > $CONFIGFS_DIR/chip/bank/label 2> /dev/null && \ fail "Setting label of a live chip should fail" echo 8 > $CONFIGFS_DIR/chip/bank/num_lines 2> /dev/null && \ fail "Setting number of lines of a live chip should fail" +disable_chip chip remove_chip chip echo "2.10. Can't create line items when chip is live" @@ -285,6 +296,7 @@ create_chip chip create_bank chip bank enable_chip chip mkdir $CONFIGFS_DIR/chip/bank/line0 2> /dev/null && fail "Creating line item should fail" +disable_chip chip remove_chip chip echo "2.11. Probe errors are propagated to user-space" @@ -316,6 +328,7 @@ mkdir -p $CONFIGFS_DIR/chip/bank/line4/hog enable_chip chip $BASE_DIR/gpio-mockup-cdev -s 1 /dev/`configfs_chip_name chip bank` 4 2> /dev/null && \ fail "Setting the value of a hogged line shouldn't succeed" +disable_chip chip remove_chip chip echo "3. Controlling simulated chips" @@ -331,6 +344,7 @@ test "$?" = "1" || fail "pull set incorrectly" sysfs_set_pull chip bank 0 pull-down $BASE_DIR/gpio-mockup-cdev /dev/`configfs_chip_name chip bank` 1 test "$?" = "0" || fail "pull set incorrectly" +disable_chip chip remove_chip chip echo "3.2. Pull can be read from sysfs" @@ -344,6 +358,7 @@ SYSFS_PATH=/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/pull test `cat $SYSFS_PATH` = "pull-down" || fail "reading the pull failed" sysfs_set_pull chip bank 0 pull-up test `cat $SYSFS_PATH` = "pull-up" || fail "reading the pull failed" +disable_chip chip remove_chip chip echo "3.3. Incorrect input in sysfs is rejected" @@ -355,6 +370,7 @@ DEVNAME=`configfs_dev_name chip` CHIPNAME=`configfs_chip_name chip bank` SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/pull" echo foobar > $SYSFS_PATH 2> /dev/null && fail "invalid input not detected" +disable_chip chip remove_chip chip echo "3.4. Can't write to value" @@ -365,6 +381,7 @@ DEVNAME=`configfs_dev_name chip` CHIPNAME=`configfs_chip_name chip bank` SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value" echo 1 > $SYSFS_PATH 2> /dev/null && fail "writing to 'value' succeeded unexpectedly" +disable_chip chip remove_chip chip echo "4. Simulated GPIO chips are functional" @@ -382,6 +399,7 @@ $BASE_DIR/gpio-mockup-cdev -s 1 /dev/`configfs_chip_name chip bank` 0 & sleep 0.1 # FIXME Any better way? test `cat $SYSFS_PATH` = "1" || fail "incorrect value read from sysfs" kill $! +disable_chip chip remove_chip chip echo "4.2. Bias settings work correctly" @@ -394,6 +412,7 @@ CHIPNAME=`configfs_chip_name chip bank` SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value" $BASE_DIR/gpio-mockup-cdev -b pull-up /dev/`configfs_chip_name chip bank` 0 test `cat $SYSFS_PATH` = "1" || fail "bias setting does not work" +disable_chip chip remove_chip chip echo "GPIO $MODULE test PASS" diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h index 584687c3286d..9d1379da59df 100644 --- a/tools/testing/selftests/kselftest_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -709,33 +709,33 @@ /* Report with actual signedness to avoid weird output. */ \ switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \ case 0: { \ - unsigned long long __exp_print = (uintptr_t)__exp; \ - unsigned long long __seen_print = (uintptr_t)__seen; \ - __TH_LOG("Expected %s (%llu) %s %s (%llu)", \ + uintmax_t __exp_print = (uintmax_t)__exp; \ + uintmax_t __seen_print = (uintmax_t)__seen; \ + __TH_LOG("Expected %s (%ju) %s %s (%ju)", \ _expected_str, __exp_print, #_t, \ _seen_str, __seen_print); \ break; \ } \ case 1: { \ - unsigned long long __exp_print = (uintptr_t)__exp; \ - long long __seen_print = (intptr_t)__seen; \ - __TH_LOG("Expected %s (%llu) %s %s (%lld)", \ + uintmax_t __exp_print = (uintmax_t)__exp; \ + intmax_t __seen_print = (intmax_t)__seen; \ + __TH_LOG("Expected %s (%ju) %s %s (%jd)", \ _expected_str, __exp_print, #_t, \ _seen_str, __seen_print); \ break; \ } \ case 2: { \ - long long __exp_print = (intptr_t)__exp; \ - unsigned long long __seen_print = (uintptr_t)__seen; \ - __TH_LOG("Expected %s (%lld) %s %s (%llu)", \ + intmax_t __exp_print = (intmax_t)__exp; \ + uintmax_t __seen_print = (uintmax_t)__seen; \ + __TH_LOG("Expected %s (%jd) %s %s (%ju)", \ _expected_str, __exp_print, #_t, \ _seen_str, __seen_print); \ break; \ } \ case 3: { \ - long long __exp_print = (intptr_t)__exp; \ - long long __seen_print = (intptr_t)__seen; \ - __TH_LOG("Expected %s (%lld) %s %s (%lld)", \ + intmax_t __exp_print = (intmax_t)__exp; \ + intmax_t __seen_print = (intmax_t)__seen; \ + __TH_LOG("Expected %s (%jd) %s %s (%jd)", \ _expected_str, __exp_print, #_t, \ _seen_str, __seen_print); \ break; \ diff --git a/tools/testing/selftests/landlock/base_test.c b/tools/testing/selftests/landlock/base_test.c index e43831746146..ea970679c5d9 100644 --- a/tools/testing/selftests/landlock/base_test.c +++ b/tools/testing/selftests/landlock/base_test.c @@ -98,10 +98,54 @@ TEST(abi_version) ASSERT_EQ(EINVAL, errno); } +/* + * Old source trees might not have the set of Kselftest fixes related to kernel + * UAPI headers. + */ +#ifndef LANDLOCK_CREATE_RULESET_ERRATA +#define LANDLOCK_CREATE_RULESET_ERRATA (1U << 1) +#endif + +TEST(errata) +{ + const struct landlock_ruleset_attr ruleset_attr = { + .handled_access_fs = LANDLOCK_ACCESS_FS_READ_FILE, + }; + int errata; + + errata = landlock_create_ruleset(NULL, 0, + LANDLOCK_CREATE_RULESET_ERRATA); + /* The errata bitmask will not be backported to tests. */ + ASSERT_LE(0, errata); + TH_LOG("errata: 0x%x", errata); + + ASSERT_EQ(-1, landlock_create_ruleset(&ruleset_attr, 0, + LANDLOCK_CREATE_RULESET_ERRATA)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset(NULL, sizeof(ruleset_attr), + LANDLOCK_CREATE_RULESET_ERRATA)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, + landlock_create_ruleset(&ruleset_attr, sizeof(ruleset_attr), + LANDLOCK_CREATE_RULESET_ERRATA)); + ASSERT_EQ(EINVAL, errno); + + ASSERT_EQ(-1, landlock_create_ruleset( + NULL, 0, + LANDLOCK_CREATE_RULESET_VERSION | + LANDLOCK_CREATE_RULESET_ERRATA)); + ASSERT_EQ(-1, landlock_create_ruleset(NULL, 0, + LANDLOCK_CREATE_RULESET_ERRATA | + 1 << 31)); + ASSERT_EQ(EINVAL, errno); +} + /* Tests ordering of syscall argument checks. */ TEST(create_ruleset_checks_ordering) { - const int last_flag = LANDLOCK_CREATE_RULESET_VERSION; + const int last_flag = LANDLOCK_CREATE_RULESET_ERRATA; const int invalid_flag = last_flag << 1; int ruleset_fd; const struct landlock_ruleset_attr ruleset_attr = { diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c index f2c3bffa6ea5..864309ebb0b4 100644 --- a/tools/testing/selftests/landlock/fs_test.c +++ b/tools/testing/selftests/landlock/fs_test.c @@ -1775,8 +1775,7 @@ static void test_execute(struct __test_metadata *const _metadata, const int err, ASSERT_EQ(1, WIFEXITED(status)); ASSERT_EQ(err ? 2 : 0, WEXITSTATUS(status)) { - TH_LOG("Unexpected return code for \"%s\": %s", path, - strerror(errno)); + TH_LOG("Unexpected return code for \"%s\"", path); }; } diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c index 4c88238fc8f0..c0ae86c28d7f 100644 --- a/tools/testing/selftests/mincore/mincore_selftest.c +++ b/tools/testing/selftests/mincore/mincore_selftest.c @@ -261,9 +261,6 @@ TEST(check_file_mmap) TH_LOG("No read-ahead pages found in memory"); } - EXPECT_LT(i, vec_size) { - TH_LOG("Read-ahead pages reached the end of the file"); - } /* * End of the readahead window. The rest of the pages shouldn't * be in memory. diff --git a/tools/testing/selftests/mount_setattr/mount_setattr_test.c b/tools/testing/selftests/mount_setattr/mount_setattr_test.c index c6a8c732b802..304e6422a1f1 100644 --- a/tools/testing/selftests/mount_setattr/mount_setattr_test.c +++ b/tools/testing/selftests/mount_setattr/mount_setattr_test.c @@ -1026,7 +1026,7 @@ FIXTURE_SETUP(mount_setattr_idmapped) "size=100000,mode=700"), 0); ASSERT_EQ(mount("testing", "/mnt", "tmpfs", MS_NOATIME | MS_NODEV, - "size=100000,mode=700"), 0); + "size=2m,mode=700"), 0); ASSERT_EQ(mkdir("/mnt/A", 0777), 0); diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 48d1a68be1d5..080860a8826b 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -72,6 +72,7 @@ TEST_GEN_PROGS += sk_bind_sendto_listen TEST_GEN_PROGS += sk_connect_zero_addr TEST_PROGS += test_ingress_egress_chaining.sh TEST_GEN_FILES += nat6to4.o +TEST_PROGS += ipv6_route_update_soft_lockup.sh TEST_FILES := settings diff --git a/tools/testing/selftests/net/gro.sh b/tools/testing/selftests/net/gro.sh index 342ad27f631b..e771f5f7faa2 100755 --- a/tools/testing/selftests/net/gro.sh +++ b/tools/testing/selftests/net/gro.sh @@ -95,5 +95,6 @@ trap cleanup EXIT if [[ "${test}" == "all" ]]; then run_all_tests else - run_test "${proto}" "${test}" + exit_code=$(run_test "${proto}" "${test}") + exit $exit_code fi; diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c index be4a30a0d02a..9b44a091802c 100644 --- a/tools/testing/selftests/net/ipsec.c +++ b/tools/testing/selftests/net/ipsec.c @@ -227,7 +227,8 @@ static int rtattr_pack(struct nlmsghdr *nh, size_t req_sz, attr->rta_len = RTA_LENGTH(size); attr->rta_type = rta_type; - memcpy(RTA_DATA(attr), payload, size); + if (payload) + memcpy(RTA_DATA(attr), payload, size); return 0; } diff --git a/tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh b/tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh new file mode 100644 index 000000000000..a6b2b1f9c641 --- /dev/null +++ b/tools/testing/selftests/net/ipv6_route_update_soft_lockup.sh @@ -0,0 +1,262 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Testing for potential kernel soft lockup during IPv6 routing table +# refresh under heavy outgoing IPv6 traffic. If a kernel soft lockup +# occurs, a kernel panic will be triggered to prevent associated issues. +# +# +# Test Environment Layout +# +# ┌----------------┐ ┌----------------┐ +# | SOURCE_NS | | SINK_NS | +# | NAMESPACE | | NAMESPACE | +# |(iperf3 clients)| |(iperf3 servers)| +# | | | | +# | | | | +# | ┌-----------| nexthops |---------┐ | +# | |veth_source|<--------------------------------------->|veth_sink|<┐ | +# | └-----------|2001:0DB8:1::0:1/96 2001:0DB8:1::1:1/96 |---------┘ | | +# | | ^ 2001:0DB8:1::1:2/96 | | | +# | | . . | fwd | | +# | ┌---------┐ | . . | | | +# | | IPv6 | | . . | V | +# | | routing | | . 2001:0DB8:1::1:80/96| ┌-----┐ | +# | | table | | . | | lo | | +# | | nexthop | | . └--------┴-----┴-┘ +# | | update | | ............................> 2001:0DB8:2::1:1/128 +# | └-------- ┘ | +# └----------------┘ +# +# The test script sets up two network namespaces, source_ns and sink_ns, +# connected via a veth link. Within source_ns, it continuously updates the +# IPv6 routing table by flushing and inserting IPV6_NEXTHOP_ADDR_COUNT nexthop +# IPs destined for SINK_LOOPBACK_IP_ADDR in sink_ns. This refresh occurs at a +# rate of 1/ROUTING_TABLE_REFRESH_PERIOD per second for TEST_DURATION seconds. +# +# Simultaneously, multiple iperf3 clients within source_ns generate heavy +# outgoing IPv6 traffic. Each client is assigned a unique port number starting +# at 5000 and incrementing sequentially. Each client targets a unique iperf3 +# server running in sink_ns, connected to the SINK_LOOPBACK_IFACE interface +# using the same port number. +# +# The number of iperf3 servers and clients is set to half of the total +# available cores on each machine. +# +# NOTE: We have tested this script on machines with various CPU specifications, +# ranging from lower to higher performance as listed below. The test script +# effectively triggered a kernel soft lockup on machines running an unpatched +# kernel in under a minute: +# +# - 1x Intel Xeon E-2278G 8-Core Processor @ 3.40GHz +# - 1x Intel Xeon E-2378G Processor 8-Core @ 2.80GHz +# - 1x AMD EPYC 7401P 24-Core Processor @ 2.00GHz +# - 1x AMD EPYC 7402P 24-Core Processor @ 2.80GHz +# - 2x Intel Xeon Gold 5120 14-Core Processor @ 2.20GHz +# - 1x Ampere Altra Q80-30 80-Core Processor @ 3.00GHz +# - 2x Intel Xeon Gold 5120 14-Core Processor @ 2.20GHz +# - 2x Intel Xeon Silver 4214 24-Core Processor @ 2.20GHz +# - 1x AMD EPYC 7502P 32-Core @ 2.50GHz +# - 1x Intel Xeon Gold 6314U 32-Core Processor @ 2.30GHz +# - 2x Intel Xeon Gold 6338 32-Core Processor @ 2.00GHz +# +# On less performant machines, you may need to increase the TEST_DURATION +# parameter to enhance the likelihood of encountering a race condition leading +# to a kernel soft lockup and avoid a false negative result. +# +# NOTE: The test may not produce the expected result in virtualized +# environments (e.g., qemu) due to differences in timing and CPU handling, +# which can affect the conditions needed to trigger a soft lockup. + +source lib.sh +source net_helper.sh + +TEST_DURATION=300 +ROUTING_TABLE_REFRESH_PERIOD=0.01 + +IPERF3_BITRATE="300m" + + +IPV6_NEXTHOP_ADDR_COUNT="128" +IPV6_NEXTHOP_ADDR_MASK="96" +IPV6_NEXTHOP_PREFIX="2001:0DB8:1" + + +SOURCE_TEST_IFACE="veth_source" +SOURCE_TEST_IP_ADDR="2001:0DB8:1::0:1/96" + +SINK_TEST_IFACE="veth_sink" +# ${SINK_TEST_IFACE} is populated with the following range of IPv6 addresses: +# 2001:0DB8:1::1:1 to 2001:0DB8:1::1:${IPV6_NEXTHOP_ADDR_COUNT} +SINK_LOOPBACK_IFACE="lo" +SINK_LOOPBACK_IP_MASK="128" +SINK_LOOPBACK_IP_ADDR="2001:0DB8:2::1:1" + +nexthop_ip_list="" +termination_signal="" +kernel_softlokup_panic_prev_val="" + +terminate_ns_processes_by_pattern() { + local ns=$1 + local pattern=$2 + + for pid in $(ip netns pids ${ns}); do + [ -e /proc/$pid/cmdline ] && grep -qe "${pattern}" /proc/$pid/cmdline && kill -9 $pid + done +} + +cleanup() { + echo "info: cleaning up namespaces and terminating all processes within them..." + + + # Terminate iperf3 instances running in the source_ns. To avoid race + # conditions, first iterate over the PIDs and terminate those + # associated with the bash shells running the + # `while true; do iperf3 -c ...; done` loops. In a second iteration, + # terminate the individual `iperf3 -c ...` instances. + terminate_ns_processes_by_pattern ${source_ns} while + terminate_ns_processes_by_pattern ${source_ns} iperf3 + + # Repeat the same process for sink_ns + terminate_ns_processes_by_pattern ${sink_ns} while + terminate_ns_processes_by_pattern ${sink_ns} iperf3 + + # Check if any iperf3 instances are still running. This could happen + # if a core has entered an infinite loop and the timeout for detecting + # the soft lockup has not expired, but either the test interval has + # already elapsed or the test was terminated manually (e.g., with ^C) + for pid in $(ip netns pids ${source_ns}); do + if [ -e /proc/$pid/cmdline ] && grep -qe 'iperf3' /proc/$pid/cmdline; then + echo "FAIL: unable to terminate some iperf3 instances. Soft lockup is underway. A kernel panic is on the way!" + exit ${ksft_fail} + fi + done + + if [ "$termination_signal" == "SIGINT" ]; then + echo "SKIP: Termination due to ^C (SIGINT)" + elif [ "$termination_signal" == "SIGALRM" ]; then + echo "PASS: No kernel soft lockup occurred during this ${TEST_DURATION} second test" + fi + + cleanup_ns ${source_ns} ${sink_ns} + + sysctl -qw kernel.softlockup_panic=${kernel_softlokup_panic_prev_val} +} + +setup_prepare() { + setup_ns source_ns sink_ns + + ip -n ${source_ns} link add name ${SOURCE_TEST_IFACE} type veth peer name ${SINK_TEST_IFACE} netns ${sink_ns} + + # Setting up the Source namespace + ip -n ${source_ns} addr add ${SOURCE_TEST_IP_ADDR} dev ${SOURCE_TEST_IFACE} + ip -n ${source_ns} link set dev ${SOURCE_TEST_IFACE} qlen 10000 + ip -n ${source_ns} link set dev ${SOURCE_TEST_IFACE} up + ip netns exec ${source_ns} sysctl -qw net.ipv6.fib_multipath_hash_policy=1 + + # Setting up the Sink namespace + ip -n ${sink_ns} addr add ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK} dev ${SINK_LOOPBACK_IFACE} + ip -n ${sink_ns} link set dev ${SINK_LOOPBACK_IFACE} up + ip netns exec ${sink_ns} sysctl -qw net.ipv6.conf.${SINK_LOOPBACK_IFACE}.forwarding=1 + + ip -n ${sink_ns} link set ${SINK_TEST_IFACE} up + ip netns exec ${sink_ns} sysctl -qw net.ipv6.conf.${SINK_TEST_IFACE}.forwarding=1 + + + # Populate nexthop IPv6 addresses on the test interface in the sink_ns + echo "info: populating ${IPV6_NEXTHOP_ADDR_COUNT} IPv6 addresses on the ${SINK_TEST_IFACE} interface ..." + for IP in $(seq 1 ${IPV6_NEXTHOP_ADDR_COUNT}); do + ip -n ${sink_ns} addr add ${IPV6_NEXTHOP_PREFIX}::$(printf "1:%x" "${IP}")/${IPV6_NEXTHOP_ADDR_MASK} dev ${SINK_TEST_IFACE}; + done + + # Preparing list of nexthops + for IP in $(seq 1 ${IPV6_NEXTHOP_ADDR_COUNT}); do + nexthop_ip_list=$nexthop_ip_list" nexthop via ${IPV6_NEXTHOP_PREFIX}::$(printf "1:%x" $IP) dev ${SOURCE_TEST_IFACE} weight 1" + done +} + + +test_soft_lockup_during_routing_table_refresh() { + # Start num_of_iperf_servers iperf3 servers in the sink_ns namespace, + # each listening on ports starting at 5001 and incrementing + # sequentially. Since iperf3 instances may terminate unexpectedly, a + # while loop is used to automatically restart them in such cases. + echo "info: starting ${num_of_iperf_servers} iperf3 servers in the sink_ns namespace ..." + for i in $(seq 1 ${num_of_iperf_servers}); do + cmd="iperf3 --bind ${SINK_LOOPBACK_IP_ADDR} -s -p $(printf '5%03d' ${i}) --rcv-timeout 200 &>/dev/null" + ip netns exec ${sink_ns} bash -c "while true; do ${cmd}; done &" &>/dev/null + done + + # Wait for the iperf3 servers to be ready + for i in $(seq ${num_of_iperf_servers}); do + port=$(printf '5%03d' ${i}); + wait_local_port_listen ${sink_ns} ${port} tcp + done + + # Continuously refresh the routing table in the background within + # the source_ns namespace + ip netns exec ${source_ns} bash -c " + while \$(ip netns list | grep -q ${source_ns}); do + ip -6 route add ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK} ${nexthop_ip_list}; + sleep ${ROUTING_TABLE_REFRESH_PERIOD}; + ip -6 route delete ${SINK_LOOPBACK_IP_ADDR}/${SINK_LOOPBACK_IP_MASK}; + done &" + + # Start num_of_iperf_servers iperf3 clients in the source_ns namespace, + # each sending TCP traffic on sequential ports starting at 5001. + # Since iperf3 instances may terminate unexpectedly (e.g., if the route + # to the server is deleted in the background during a route refresh), a + # while loop is used to automatically restart them in such cases. + echo "info: starting ${num_of_iperf_servers} iperf3 clients in the source_ns namespace ..." + for i in $(seq 1 ${num_of_iperf_servers}); do + cmd="iperf3 -c ${SINK_LOOPBACK_IP_ADDR} -p $(printf '5%03d' ${i}) --length 64 --bitrate ${IPERF3_BITRATE} -t 0 --connect-timeout 150 &>/dev/null" + ip netns exec ${source_ns} bash -c "while true; do ${cmd}; done &" &>/dev/null + done + + echo "info: IPv6 routing table is being updated at the rate of $(echo "1/${ROUTING_TABLE_REFRESH_PERIOD}" | bc)/s for ${TEST_DURATION} seconds ..." + echo "info: A kernel soft lockup, if detected, results in a kernel panic!" + + wait +} + +# Make sure 'iperf3' is installed, skip the test otherwise +if [ ! -x "$(command -v "iperf3")" ]; then + echo "SKIP: 'iperf3' is not installed. Skipping the test." + exit ${ksft_skip} +fi + +# Determine the number of cores on the machine +num_of_iperf_servers=$(( $(nproc)/2 )) + +# Check if we are running on a multi-core machine, skip the test otherwise +if [ "${num_of_iperf_servers}" -eq 0 ]; then + echo "SKIP: This test is not valid on a single core machine!" + exit ${ksft_skip} +fi + +# Since the kernel soft lockup we're testing causes at least one core to enter +# an infinite loop, destabilizing the host and likely affecting subsequent +# tests, we trigger a kernel panic instead of reporting a failure and +# continuing +kernel_softlokup_panic_prev_val=$(sysctl -n kernel.softlockup_panic) +sysctl -qw kernel.softlockup_panic=1 + +handle_sigint() { + termination_signal="SIGINT" + cleanup + exit ${ksft_skip} +} + +handle_sigalrm() { + termination_signal="SIGALRM" + cleanup + exit ${ksft_pass} +} + +trap handle_sigint SIGINT +trap handle_sigalrm SIGALRM + +(sleep ${TEST_DURATION} && kill -s SIGALRM $$)& + +setup_prepare +test_soft_lockup_during_routing_table_refresh diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c index b6b9f41dbc29..a842f5ade6e3 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_connect.c +++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c @@ -25,6 +25,8 @@ #include #include +#include + #include #include @@ -1131,23 +1133,42 @@ static void parse_setsock_options(const char *name) exit(1); } -void xdisconnect(int fd, int addrlen) +void xdisconnect(int fd) { - struct sockaddr_storage empty; + socklen_t addrlen = sizeof(struct sockaddr_storage); + struct sockaddr_storage addr, empty; int msec_sleep = 10; - int queued = 1; - int i; + void *raw_addr; + int i, cmdlen; + char cmd[128]; + + /* get the local address and convert it to string */ + if (getsockname(fd, (struct sockaddr *)&addr, &addrlen) < 0) + xerror("getsockname"); + + if (addr.ss_family == AF_INET) + raw_addr = &(((struct sockaddr_in *)&addr)->sin_addr); + else if (addr.ss_family == AF_INET6) + raw_addr = &(((struct sockaddr_in6 *)&addr)->sin6_addr); + else + xerror("bad family"); + + strcpy(cmd, "ss -M | grep -q "); + cmdlen = strlen(cmd); + if (!inet_ntop(addr.ss_family, raw_addr, &cmd[cmdlen], + sizeof(cmd) - cmdlen)) + xerror("inet_ntop"); shutdown(fd, SHUT_WR); - /* while until the pending data is completely flushed, the later + /* + * wait until the pending data is completely flushed and all + * the MPTCP sockets reached the closed status. * disconnect will bypass/ignore/drop any pending data. */ for (i = 0; ; i += msec_sleep) { - if (ioctl(fd, SIOCOUTQ, &queued) < 0) - xerror("can't query out socket queue: %d", errno); - - if (!queued) + /* closed socket are not listed by 'ss' */ + if (system(cmd) != 0) break; if (i > poll_timeout) @@ -1192,12 +1213,12 @@ again: /* close the client socket open only if we are not going to reconnect */ ret = copyfd_io(fd_in, fd, 1, 0); if (ret) - return ret; + goto out; if (cfg_truncate > 0) { - xdisconnect(fd, peer->ai_addrlen); + shutdown(fd, SHUT_WR); } else if (--cfg_repeat > 0) { - xdisconnect(fd, peer->ai_addrlen); + xdisconnect(fd); /* the socket could be unblocking at this point, we need the * connect to be blocking @@ -1212,7 +1233,10 @@ again: close(fd); } - return 0; +out: + if (cfg_input) + close(fd_in); + return ret; } int parse_proto(const char *proto) diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index 00ab4c6e4044..33f4fb34ac9b 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -197,6 +197,12 @@ # # - pmtu_ipv6_route_change # Same as above but with IPv6 +# +# - pmtu_ipv4_mp_exceptions +# Use the same topology as in pmtu_ipv4, but add routeable addresses +# on host A and B on lo reachable via both routers. Host A and B +# addresses have multipath routes to each other, b_r1 mtu = 1500. +# Check that PMTU exceptions are created for both paths. # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 @@ -266,7 +272,8 @@ tests=" list_flush_ipv4_exception ipv4: list and flush cached exceptions 1 list_flush_ipv6_exception ipv6: list and flush cached exceptions 1 pmtu_ipv4_route_change ipv4: PMTU exception w/route replace 1 - pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1" + pmtu_ipv6_route_change ipv6: PMTU exception w/route replace 1 + pmtu_ipv4_mp_exceptions ipv4: PMTU multipath nh exceptions 1" NS_A="ns-A" NS_B="ns-B" @@ -353,6 +360,9 @@ tunnel6_a_addr="fd00:2::a" tunnel6_b_addr="fd00:2::b" tunnel6_mask="64" +host4_a_addr="192.168.99.99" +host4_b_addr="192.168.88.88" + dummy6_0_prefix="fc00:1000::" dummy6_1_prefix="fc00:1001::" dummy6_mask="64" @@ -907,6 +917,52 @@ setup_ovs_bridge() { run_cmd ip route add ${prefix6}:${b_r1}::1 via ${prefix6}:${a_r1}::2 } +setup_multipath_new() { + # Set up host A with multipath routes to host B host4_b_addr + run_cmd ${ns_a} ip addr add ${host4_a_addr} dev lo + run_cmd ${ns_a} ip nexthop add id 401 via ${prefix4}.${a_r1}.2 dev veth_A-R1 + run_cmd ${ns_a} ip nexthop add id 402 via ${prefix4}.${a_r2}.2 dev veth_A-R2 + run_cmd ${ns_a} ip nexthop add id 403 group 401/402 + run_cmd ${ns_a} ip route add ${host4_b_addr} src ${host4_a_addr} nhid 403 + + # Set up host B with multipath routes to host A host4_a_addr + run_cmd ${ns_b} ip addr add ${host4_b_addr} dev lo + run_cmd ${ns_b} ip nexthop add id 401 via ${prefix4}.${b_r1}.2 dev veth_B-R1 + run_cmd ${ns_b} ip nexthop add id 402 via ${prefix4}.${b_r2}.2 dev veth_B-R2 + run_cmd ${ns_b} ip nexthop add id 403 group 401/402 + run_cmd ${ns_b} ip route add ${host4_a_addr} src ${host4_b_addr} nhid 403 +} + +setup_multipath_old() { + # Set up host A with multipath routes to host B host4_b_addr + run_cmd ${ns_a} ip addr add ${host4_a_addr} dev lo + run_cmd ${ns_a} ip route add ${host4_b_addr} \ + src ${host4_a_addr} \ + nexthop via ${prefix4}.${a_r1}.2 weight 1 \ + nexthop via ${prefix4}.${a_r2}.2 weight 1 + + # Set up host B with multipath routes to host A host4_a_addr + run_cmd ${ns_b} ip addr add ${host4_b_addr} dev lo + run_cmd ${ns_b} ip route add ${host4_a_addr} \ + src ${host4_b_addr} \ + nexthop via ${prefix4}.${b_r1}.2 weight 1 \ + nexthop via ${prefix4}.${b_r2}.2 weight 1 +} + +setup_multipath() { + if [ "$USE_NH" = "yes" ]; then + setup_multipath_new + else + setup_multipath_old + fi + + # Set up routers with routes to dummies + run_cmd ${ns_r1} ip route add ${host4_a_addr} via ${prefix4}.${a_r1}.1 + run_cmd ${ns_r2} ip route add ${host4_a_addr} via ${prefix4}.${a_r2}.1 + run_cmd ${ns_r1} ip route add ${host4_b_addr} via ${prefix4}.${b_r1}.1 + run_cmd ${ns_r2} ip route add ${host4_b_addr} via ${prefix4}.${b_r2}.1 +} + setup() { [ "$(id -u)" -ne 0 ] && echo " need to run as root" && return $ksft_skip @@ -988,23 +1044,15 @@ link_get_mtu() { } route_get_dst_exception() { - ns_cmd="${1}" - dst="${2}" - dsfield="${3}" + ns_cmd="${1}"; shift - if [ -z "${dsfield}" ]; then - dsfield=0 - fi - - ${ns_cmd} ip route get "${dst}" dsfield "${dsfield}" + ${ns_cmd} ip route get "$@" } route_get_dst_pmtu_from_exception() { - ns_cmd="${1}" - dst="${2}" - dsfield="${3}" + ns_cmd="${1}"; shift - mtu_parse "$(route_get_dst_exception "${ns_cmd}" "${dst}" "${dsfield}")" + mtu_parse "$(route_get_dst_exception "${ns_cmd}" "$@")" } check_pmtu_value() { @@ -1147,10 +1195,10 @@ test_pmtu_ipv4_dscp_icmp_exception() { run_cmd "${ns_a}" ping -q -M want -Q "${dsfield}" -c 1 -w 1 -s "${len}" "${dst2}" # Check that exceptions have been created with the correct PMTU - pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" "${policy_mark}")" + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" dsfield "${policy_mark}")" check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 - pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" "${policy_mark}")" + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" dsfield "${policy_mark}")" check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 } @@ -1197,9 +1245,9 @@ test_pmtu_ipv4_dscp_udp_exception() { UDP:"${dst2}":50000,tos="${dsfield}" # Check that exceptions have been created with the correct PMTU - pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" "${policy_mark}")" + pmtu_1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst1}" dsfield "${policy_mark}")" check_pmtu_value "1400" "${pmtu_1}" "exceeding MTU" || return 1 - pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" "${policy_mark}")" + pmtu_2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${dst2}" dsfield "${policy_mark}")" check_pmtu_value "1500" "${pmtu_2}" "exceeding MTU" || return 1 } @@ -1932,7 +1980,7 @@ check_running() { pid=${1} cmd=${2} - [ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "{cmd}" ] + [ "$(cat /proc/${pid}/cmdline 2>/dev/null | tr -d '\0')" = "${cmd}" ] } test_cleanup_vxlanX_exception() { @@ -2205,6 +2253,36 @@ test_pmtu_ipv6_route_change() { test_pmtu_ipvX_route_change 6 } +test_pmtu_ipv4_mp_exceptions() { + setup namespaces routing multipath || return $ksft_skip + + trace "${ns_a}" veth_A-R1 "${ns_r1}" veth_R1-A \ + "${ns_r1}" veth_R1-B "${ns_b}" veth_B-R1 \ + "${ns_a}" veth_A-R2 "${ns_r2}" veth_R2-A \ + "${ns_r2}" veth_R2-B "${ns_b}" veth_B-R2 + + # Set up initial MTU values + mtu "${ns_a}" veth_A-R1 2000 + mtu "${ns_r1}" veth_R1-A 2000 + mtu "${ns_r1}" veth_R1-B 1500 + mtu "${ns_b}" veth_B-R1 1500 + + mtu "${ns_a}" veth_A-R2 2000 + mtu "${ns_r2}" veth_R2-A 2000 + mtu "${ns_r2}" veth_R2-B 1500 + mtu "${ns_b}" veth_B-R2 1500 + + # Ping and expect two nexthop exceptions for two routes + run_cmd ${ns_a} ping -q -M want -i 0.1 -c 1 -s 1800 "${host4_b_addr}" + + # Check that exceptions have been created with the correct PMTU + pmtu_a_R1="$(route_get_dst_pmtu_from_exception "${ns_a}" "${host4_b_addr}" oif veth_A-R1)" + pmtu_a_R2="$(route_get_dst_pmtu_from_exception "${ns_a}" "${host4_b_addr}" oif veth_A-R2)" + + check_pmtu_value "1500" "${pmtu_a_R1}" "exceeding MTU (veth_A-R1)" || return 1 + check_pmtu_value "1500" "${pmtu_a_R2}" "exceeding MTU (veth_A-R2)" || return 1 +} + usage() { echo echo "$0 [OPTIONS] [TEST]..." diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index cafd14b1ed2a..ff1242f2afaa 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -813,10 +813,10 @@ kci_test_ipsec_offload() # does driver have correct offload info diff $sysfsf - << EOF SA count=2 tx=3 -sa[0] tx ipaddr=0x00000000 00000000 00000000 00000000 +sa[0] tx ipaddr=$dstip sa[0] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 sa[0] key=0x34333231 38373635 32313039 36353433 -sa[1] rx ipaddr=0x00000000 00000000 00000000 037ba8c0 +sa[1] rx ipaddr=$srcip sa[1] spi=0x00000009 proto=0x32 salt=0x61626364 crypt=1 sa[1] key=0x34333231 38373635 32313039 36353433 EOF diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c index b02080d09fbc..d0fba50bd6ef 100644 --- a/tools/testing/selftests/net/udpgso.c +++ b/tools/testing/selftests/net/udpgso.c @@ -94,6 +94,19 @@ struct testcase testcases_v4[] = { .gso_len = CONST_MSS_V4, .r_num_mss = 1, }, + { + /* datalen <= MSS < gso_len: will fall back to no GSO */ + .tlen = CONST_MSS_V4, + .gso_len = CONST_MSS_V4 + 1, + .r_num_mss = 0, + .r_len_last = CONST_MSS_V4, + }, + { + /* MSS < datalen < gso_len: fail */ + .tlen = CONST_MSS_V4 + 1, + .gso_len = CONST_MSS_V4 + 2, + .tfail = true, + }, { /* send a single MSS + 1B */ .tlen = CONST_MSS_V4 + 1, @@ -197,6 +210,19 @@ struct testcase testcases_v6[] = { .gso_len = CONST_MSS_V6, .r_num_mss = 1, }, + { + /* datalen <= MSS < gso_len: will fall back to no GSO */ + .tlen = CONST_MSS_V6, + .gso_len = CONST_MSS_V6 + 1, + .r_num_mss = 0, + .r_len_last = CONST_MSS_V6, + }, + { + /* MSS < datalen < gso_len: fail */ + .tlen = CONST_MSS_V6 + 1, + .gso_len = CONST_MSS_V6 + 2, + .tfail = true + }, { /* send a single MSS + 1B */ .tlen = CONST_MSS_V6 + 1, diff --git a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c index 580fcac0a09f..b71ef8a493ed 100644 --- a/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c +++ b/tools/testing/selftests/powerpc/benchmarks/gettimeofday.c @@ -20,7 +20,7 @@ static int test_gettimeofday(void) gettimeofday(&tv_end, NULL); } - timersub(&tv_start, &tv_end, &tv_diff); + timersub(&tv_end, &tv_start, &tv_diff); printf("time = %.6f\n", tv_diff.tv_sec + (tv_diff.tv_usec) * 1e-6); diff --git a/tools/testing/selftests/resctrl/resctrl_val.c b/tools/testing/selftests/resctrl/resctrl_val.c index 00864242d76c..d2ee8ac8dd8e 100644 --- a/tools/testing/selftests/resctrl/resctrl_val.c +++ b/tools/testing/selftests/resctrl/resctrl_val.c @@ -102,13 +102,12 @@ void get_event_and_umask(char *cas_count_cfg, int count, bool op) char *token[MAX_TOKENS]; int i = 0; - strcat(cas_count_cfg, ","); token[0] = strtok(cas_count_cfg, "=,"); for (i = 1; i < MAX_TOKENS; i++) token[i] = strtok(NULL, "=,"); - for (i = 0; i < MAX_TOKENS; i++) { + for (i = 0; i < MAX_TOKENS - 1; i++) { if (!token[i]) break; if (strcmp(token[i], "event") == 0) { diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json b/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json index 58189327f644..383fbda07245 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/flow.json @@ -78,10 +78,10 @@ "setup": [ "$TC qdisc add dev $DEV1 ingress" ], - "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0xff", + "cmdUnderTest": "$TC filter add dev $DEV1 parent ffff: handle 1 prio 1 protocol ip flow map key dst rshift 0x1f", "expExitCode": "0", "verifyCmd": "$TC filter get dev $DEV1 parent ffff: handle 1 protocol ip prio 1 flow", - "matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 255 baseclass", + "matchPattern": "filter parent ffff: protocol ip pref 1 flow chain [0-9]+ handle 0x1 map keys dst rshift 31 baseclass", "matchCount": "1", "teardown": [ "$TC qdisc del dev $DEV1 ingress" diff --git a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json index 08d4861c2e78..d04fed83332c 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json +++ b/tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json @@ -132,6 +132,28 @@ "echo \"1\" > /sys/bus/netdevsim/del_device" ] }, + { + "id": "831f", + "name": "Add taprio Qdisc with too short cycle-time", + "category": [ + "qdisc", + "taprio" + ], + "plugins": { + "requires": "nsPlugin" + }, + "setup": [ + "echo \"1 1 8\" > /sys/bus/netdevsim/new_device" + ], + "cmdUnderTest": "$TC qdisc add dev $ETH root handle 1: taprio num_tc 2 queues 1@0 1@1 sched-entry S 01 200000 sched-entry S 02 200000 cycle-time 100 clockid CLOCK_TAI", + "expExitCode": "2", + "verifyCmd": "$TC qdisc show dev $ETH", + "matchPattern": "qdisc taprio 1: root refcnt", + "matchCount": "0", + "teardown": [ + "echo \"1\" > /sys/bus/netdevsim/del_device" + ] + }, { "id": "3e1e", "name": "Add taprio Qdisc with an invalid cycle-time", diff --git a/tools/testing/selftests/timers/clocksource-switch.c b/tools/testing/selftests/timers/clocksource-switch.c index c5264594064c..83faa4e354e3 100644 --- a/tools/testing/selftests/timers/clocksource-switch.c +++ b/tools/testing/selftests/timers/clocksource-switch.c @@ -156,8 +156,8 @@ int main(int argc, char **argv) /* Check everything is sane before we start switching asynchronously */ if (do_sanity_check) { for (i = 0; i < count; i++) { - printf("Validating clocksource %s\n", - clocksource_list[i]); + ksft_print_msg("Validating clocksource %s\n", + clocksource_list[i]); if (change_clocksource(clocksource_list[i])) { status = -1; goto out; @@ -169,7 +169,7 @@ int main(int argc, char **argv) } } - printf("Running Asynchronous Switching Tests...\n"); + ksft_print_msg("Running Asynchronous Switching Tests...\n"); pid = fork(); if (!pid) return run_tests(runtime); diff --git a/tools/testing/selftests/ublk/test_stripe_04.sh b/tools/testing/selftests/ublk/test_stripe_04.sh new file mode 100755 index 000000000000..1f2b642381d1 --- /dev/null +++ b/tools/testing/selftests/ublk/test_stripe_04.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +. "$(cd "$(dirname "$0")" && pwd)"/test_common.sh + +TID="stripe_04" +ERR_CODE=0 + +_prep_test "stripe" "mkfs & mount & umount on zero copy" + +backfile_0=$(_create_backfile 256M) +backfile_1=$(_create_backfile 256M) +dev_id=$(_add_ublk_dev -t stripe -z -q 2 "$backfile_0" "$backfile_1") +_check_add_dev $TID $? "$backfile_0" "$backfile_1" + +_mkfs_mount_test /dev/ublkb"${dev_id}" +ERR_CODE=$? + +_cleanup_test "stripe" + +_remove_backfile "$backfile_0" +_remove_backfile "$backfile_1" + +_show_result $TID $ERR_CODE diff --git a/tools/testing/selftests/vDSO/parse_vdso.c b/tools/testing/selftests/vDSO/parse_vdso.c index 7dd5668ea8a6..28f35620c499 100644 --- a/tools/testing/selftests/vDSO/parse_vdso.c +++ b/tools/testing/selftests/vDSO/parse_vdso.c @@ -222,8 +222,7 @@ void *vdso_sym(const char *version, const char *name) ELF(Sym) *sym = &vdso_info.symtab[chain]; /* Check for a defined global or weak function w/ right name. */ - if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC && - ELF64_ST_TYPE(sym->st_info) != STT_NOTYPE) + if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) continue; if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL && ELF64_ST_BIND(sym->st_info) != STB_WEAK) diff --git a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh index 8e00276b4e69..dc3fc438b3d9 100644 --- a/tools/testing/selftests/vm/charge_reserved_hugetlb.sh +++ b/tools/testing/selftests/vm/charge_reserved_hugetlb.sh @@ -27,7 +27,7 @@ fi if [[ $cgroup2 ]]; then cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}') if [[ -z "$cgroup_path" ]]; then - cgroup_path=/dev/cgroup/memory + cgroup_path=$(mktemp -d) mount -t cgroup2 none $cgroup_path do_umount=1 fi @@ -35,7 +35,7 @@ if [[ $cgroup2 ]]; then else cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') if [[ -z "$cgroup_path" ]]; then - cgroup_path=/dev/cgroup/memory + cgroup_path=$(mktemp -d) mount -t cgroup memory,hugetlb $cgroup_path do_umount=1 fi diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c index 309b3750e57e..38fec412206b 100644 --- a/tools/testing/selftests/vm/compaction_test.c +++ b/tools/testing/selftests/vm/compaction_test.c @@ -89,6 +89,8 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size) int compaction_index = 0; char initial_nr_hugepages[20] = {0}; char nr_hugepages[20] = {0}; + char target_nr_hugepages[24] = {0}; + int slen; /* We want to test with 80% of available memory. Else, OOM killer comes in to play */ @@ -119,11 +121,18 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size) lseek(fd, 0, SEEK_SET); - /* Request a large number of huge pages. The Kernel will allocate - as much as it can */ - if (write(fd, "100000", (6*sizeof(char))) != (6*sizeof(char))) { - ksft_print_msg("Failed to write 100000 to /proc/sys/vm/nr_hugepages: %s\n", - strerror(errno)); + /* + * Request huge pages for about half of the free memory. The Kernel + * will allocate as much as it can, and we expect it will get at least 1/3 + */ + nr_hugepages_ul = mem_free / hugepage_size / 2; + snprintf(target_nr_hugepages, sizeof(target_nr_hugepages), + "%lu", nr_hugepages_ul); + + slen = strlen(target_nr_hugepages); + if (write(fd, target_nr_hugepages, slen) != slen) { + ksft_print_msg("Failed to write %lu to /proc/sys/vm/nr_hugepages: %s\n", + nr_hugepages_ul, strerror(errno)); goto close_fd; } diff --git a/tools/testing/selftests/vm/hugetlb_reparenting_test.sh b/tools/testing/selftests/vm/hugetlb_reparenting_test.sh index 14d26075c863..302f2c7003f0 100644 --- a/tools/testing/selftests/vm/hugetlb_reparenting_test.sh +++ b/tools/testing/selftests/vm/hugetlb_reparenting_test.sh @@ -22,7 +22,7 @@ fi if [[ $cgroup2 ]]; then CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}') if [[ -z "$CGROUP_ROOT" ]]; then - CGROUP_ROOT=/dev/cgroup/memory + CGROUP_ROOT=$(mktemp -d) mount -t cgroup2 none $CGROUP_ROOT do_umount=1 fi diff --git a/tools/testing/selftests/watchdog/watchdog-test.c b/tools/testing/selftests/watchdog/watchdog-test.c index f45e510500c0..09773695d219 100644 --- a/tools/testing/selftests/watchdog/watchdog-test.c +++ b/tools/testing/selftests/watchdog/watchdog-test.c @@ -242,7 +242,13 @@ int main(int argc, char *argv[]) printf("Watchdog Ticking Away!\n"); + /* + * Register the signals + */ signal(SIGINT, term); + signal(SIGTERM, term); + signal(SIGKILL, term); + signal(SIGQUIT, term); while (1) { keep_alive(); diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh index 405ff262ca93..55500f901fbc 100755 --- a/tools/testing/selftests/wireguard/netns.sh +++ b/tools/testing/selftests/wireguard/netns.sh @@ -332,6 +332,7 @@ waitiface $netns1 vethc waitiface $netns2 veths n0 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' +[[ -e /proc/sys/net/netfilter/nf_conntrack_udp_timeout ]] || modprobe nf_conntrack n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout' n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream' n0 iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -d 10.0.0.0/24 -j SNAT --to 10.0.0.1 diff --git a/tools/tracing/rtla/src/osnoise.c b/tools/tracing/rtla/src/osnoise.c index b8ec6c15bccb..333195dc63d9 100644 --- a/tools/tracing/rtla/src/osnoise.c +++ b/tools/tracing/rtla/src/osnoise.c @@ -693,7 +693,7 @@ int osnoise_set_tracing_thresh(struct osnoise_context *context, long long tracin retval = osnoise_write_ll_config("tracing_thresh", tracing_thresh); if (retval < 0) - return -1; + return -2; context->tracing_thresh = tracing_thresh; diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c index ed08295bfa12..b7a7dcd68b57 100644 --- a/tools/tracing/rtla/src/timerlat_hist.c +++ b/tools/tracing/rtla/src/timerlat_hist.c @@ -783,9 +783,20 @@ out_err: } static int stop_tracing; +static struct trace_instance *hist_inst = NULL; static void stop_hist(int sig) { + if (stop_tracing) { + /* + * Stop requested twice in a row; abort event processing and + * exit immediately + */ + tracefs_iterate_stop(hist_inst->inst); + return; + } stop_tracing = 1; + if (hist_inst) + trace_instance_stop(hist_inst); } /* @@ -828,6 +839,12 @@ int timerlat_hist_main(int argc, char *argv[]) } trace = &tool->trace; + /* + * Save trace instance into global variable so that SIGINT can stop + * the timerlat tracer. + * Otherwise, rtla could loop indefinitely when overloaded. + */ + hist_inst = trace; retval = enable_timerlat(trace); if (retval) { @@ -894,7 +911,7 @@ int timerlat_hist_main(int argc, char *argv[]) return_value = 0; - if (trace_is_off(&tool->trace, &record->trace)) { + if (trace_is_off(&tool->trace, &record->trace) && !stop_tracing) { printf("rtla timerlat hit stop tracing\n"); if (params->trace_output) { printf(" Saving trace to %s\n", params->trace_output); diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c index 8fc0f6aa19da..46c3405a356f 100644 --- a/tools/tracing/rtla/src/timerlat_top.c +++ b/tools/tracing/rtla/src/timerlat_top.c @@ -575,9 +575,20 @@ out_err: } static int stop_tracing; +static struct trace_instance *top_inst = NULL; static void stop_top(int sig) { + if (stop_tracing) { + /* + * Stop requested twice in a row; abort event processing and + * exit immediately + */ + tracefs_iterate_stop(top_inst->inst); + return; + } stop_tracing = 1; + if (top_inst) + trace_instance_stop(top_inst); } /* @@ -620,6 +631,13 @@ int timerlat_top_main(int argc, char *argv[]) } trace = &top->trace; + /* + * Save trace instance into global variable so that SIGINT can stop + * the timerlat tracer. + * Otherwise, rtla could loop indefinitely when overloaded. + */ + top_inst = trace; + retval = enable_timerlat(trace); if (retval) { @@ -690,7 +708,7 @@ int timerlat_top_main(int argc, char *argv[]) return_value = 0; - if (trace_is_off(&top->trace, &record->trace)) { + if (trace_is_off(&top->trace, &record->trace) && !stop_tracing) { printf("rtla timerlat hit stop tracing\n"); if (params->trace_output) { printf(" Saving trace to %s\n", params->trace_output); diff --git a/tools/tracing/rtla/src/trace.c b/tools/tracing/rtla/src/trace.c index e1ba6d9f4265..93e4032b2397 100644 --- a/tools/tracing/rtla/src/trace.c +++ b/tools/tracing/rtla/src/trace.c @@ -196,6 +196,14 @@ int trace_instance_start(struct trace_instance *trace) return tracefs_trace_on(trace->inst); } +/* + * trace_instance_stop - stop tracing a given rtla instance + */ +int trace_instance_stop(struct trace_instance *trace) +{ + return tracefs_trace_off(trace->inst); +} + /* * trace_events_free - free a list of trace events */ diff --git a/tools/tracing/rtla/src/trace.h b/tools/tracing/rtla/src/trace.h index 2e9a89a25615..551a7cb81f63 100644 --- a/tools/tracing/rtla/src/trace.h +++ b/tools/tracing/rtla/src/trace.h @@ -21,6 +21,7 @@ struct trace_instance { int trace_instance_init(struct trace_instance *trace, char *tool_name); int trace_instance_start(struct trace_instance *trace); +int trace_instance_stop(struct trace_instance *trace); void trace_instance_destroy(struct trace_instance *trace); struct trace_seq *get_trace_seq(void); diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c index 8c8d63c7196c..02194773ef12 100644 --- a/tools/tracing/rtla/src/utils.c +++ b/tools/tracing/rtla/src/utils.c @@ -238,7 +238,7 @@ long parse_ns_duration(char *val) #define SCHED_DEADLINE 6 -static inline int sched_setattr(pid_t pid, const struct sched_attr *attr, +static inline int syscall_sched_setattr(pid_t pid, const struct sched_attr *attr, unsigned int flags) { return syscall(__NR_sched_setattr, pid, attr, flags); } @@ -248,7 +248,7 @@ int __set_sched_attr(int pid, struct sched_attr *attr) int flags = 0; int retval; - retval = sched_setattr(pid, attr, flags); + retval = syscall_sched_setattr(pid, attr, flags); if (retval < 0) { err_msg("Failed to set sched attributes to the pid %d: %s\n", pid, strerror(errno)); diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h index 92da41aaf4c4..e445ccf15e70 100644 --- a/tools/tracing/rtla/src/utils.h +++ b/tools/tracing/rtla/src/utils.h @@ -44,6 +44,7 @@ update_sum(unsigned long long *a, unsigned long long *b) *a += *b; } +#ifndef SCHED_ATTR_SIZE_VER0 struct sched_attr { uint32_t size; uint32_t sched_policy; @@ -54,6 +55,7 @@ struct sched_attr { uint64_t sched_deadline; uint64_t sched_period; }; +#endif /* SCHED_ATTR_SIZE_VER0 */ int parse_prio(char *arg, struct sched_attr *sched_param); int set_comm_sched_attr(const char *comm_prefix, struct sched_attr *attr); diff --git a/tools/verification/dot2/automata.py b/tools/verification/dot2/automata.py index baffeb960ff0..bdeb98baa8b0 100644 --- a/tools/verification/dot2/automata.py +++ b/tools/verification/dot2/automata.py @@ -29,11 +29,11 @@ class Automata: def __get_model_name(self): basename = ntpath.basename(self.__dot_path) - if basename.endswith(".dot") == False: + if not basename.endswith(".dot") and not basename.endswith(".gv"): print("not a dot file") raise Exception("not a dot file: %s" % self.__dot_path) - model_name = basename[0:-4] + model_name = ntpath.splitext(basename)[0] if model_name.__len__() == 0: raise Exception("not a dot file: %s" % self.__dot_path) @@ -68,9 +68,9 @@ class Automata: def __get_cursor_begin_events(self): cursor = 0 while self.__dot_lines[cursor].split()[0] != "{node": - cursor += 1 + cursor += 1 while self.__dot_lines[cursor].split()[0] == "{node": - cursor += 1 + cursor += 1 # skip initial state transition cursor += 1 return cursor @@ -94,11 +94,11 @@ class Automata: initial_state = state[7:] else: states.append(state) - if self.__dot_lines[cursor].__contains__("doublecircle") == True: + if "doublecircle" in self.__dot_lines[cursor]: final_states.append(state) has_final_states = True - if self.__dot_lines[cursor].__contains__("ellipse") == True: + if "ellipse" in self.__dot_lines[cursor]: final_states.append(state) has_final_states = True @@ -110,7 +110,7 @@ class Automata: # Insert the initial state at the bein og the states states.insert(0, initial_state) - if has_final_states == False: + if not has_final_states: final_states.append(initial_state) return states, initial_state, final_states @@ -120,7 +120,7 @@ class Automata: cursor = self.__get_cursor_begin_events() events = [] - while self.__dot_lines[cursor][1] == '"': + while self.__dot_lines[cursor].lstrip()[0] == '"': # transitions have the format: # "all_fired" -> "both_fired" [ label = "disable_irq" ]; # ------------ event is here ------------^^^^^ @@ -161,7 +161,7 @@ class Automata: # and we are back! Let's fill the matrix cursor = self.__get_cursor_begin_events() - while self.__dot_lines[cursor][1] == '"': + while self.__dot_lines[cursor].lstrip()[0] == '"': if self.__dot_lines[cursor].split()[1] == "->": line = self.__dot_lines[cursor].split() origin_state = line[0].replace('"','').replace(',','_')