diff --git a/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor new file mode 100644 index 00000000000000..d76cd3946434d0 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-spi-devices-spi-nor @@ -0,0 +1,31 @@ +What: /sys/bus/spi/devices/.../spi-nor/jedec_id +Date: April 2021 +KernelVersion: 5.14 +Contact: linux-mtd@lists.infradead.org +Description: (RO) The JEDEC ID of the SPI NOR flash as reported by the + flash device. + + +What: /sys/bus/spi/devices/.../spi-nor/manufacturer +Date: April 2021 +KernelVersion: 5.14 +Contact: linux-mtd@lists.infradead.org +Description: (RO) Manufacturer of the SPI NOR flash. + + +What: /sys/bus/spi/devices/.../spi-nor/partname +Date: April 2021 +KernelVersion: 5.14 +Contact: linux-mtd@lists.infradead.org +Description: (RO) Part name of the SPI NOR flash. + + +What: /sys/bus/spi/devices/.../spi-nor/sfdp +Date: April 2021 +KernelVersion: 5.14 +Contact: linux-mtd@lists.infradead.org +Description: (RO) This attribute is only present if the SPI NOR flash + device supports the "Read SFDP" command (5Ah). + + If present, it contains the complete SFDP (serial flash + discoverable parameters) binary data of the flash. diff --git a/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml new file mode 100644 index 00000000000000..7a63c85ef8c586 --- /dev/null +++ b/Documentation/devicetree/bindings/memory-controllers/arm,pl353-smc.yaml @@ -0,0 +1,131 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/memory-controllers/arm,pl353-smc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ARM PL353 Static Memory Controller (SMC) device-tree bindings + +maintainers: + - Miquel Raynal + - Naga Sureshkumar Relli + +description: + The PL353 Static Memory Controller is a bus where you can connect two kinds + of memory interfaces, which are NAND and memory mapped interfaces (such as + SRAM or NOR). + +# We need a select here so we don't match all nodes with 'arm,primecell' +select: + properties: + compatible: + contains: + const: arm,pl353-smc-r2p1 + required: + - compatible + +properties: + $nodename: + pattern: "^memory-controller@[0-9a-f]+$" + + compatible: + items: + - const: arm,pl353-smc-r2p1 + - const: arm,primecell + + "#address-cells": + const: 2 + + "#size-cells": + const: 1 + + reg: + items: + - description: + Configuration registers for the host and sub-controllers. + The three chip select regions are defined in 'ranges'. + + clocks: + items: + - description: clock for the memory device bus + - description: main clock of the SMC + + clock-names: + items: + - const: memclk + - const: apb_pclk + + ranges: + minItems: 1 + maxItems: 3 + description: | + Memory bus areas for interacting with the devices. Reflects + the memory layout with four integer values following: + 0 + items: + - description: NAND bank 0 + - description: NOR/SRAM bank 0 + - description: NOR/SRAM bank 1 + + interrupts: true + +patternProperties: + "@[0-3],[a-f0-9]+$": + type: object + description: | + The child device node represents the controller connected to the SMC + bus. The controller can be a NAND controller or a pair of any memory + mapped controllers such as NOR and SRAM controllers. + + properties: + compatible: + description: + Compatible of memory controller. + + reg: + items: + - items: + - description: | + Chip-select ID, as in the parent range property. + minimum: 0 + maximum: 2 + - description: | + Offset of the memory region requested by the device. + - description: | + Length of the memory region requested by the device. + + required: + - compatible + - reg + +required: + - compatible + - reg + - clock-names + - clocks + - "#address-cells" + - "#size-cells" + - ranges + +additionalProperties: false + +examples: + - | + smcc: memory-controller@e000e000 { + compatible = "arm,pl353-smc-r2p1", "arm,primecell"; + reg = <0xe000e000 0x0001000>; + clock-names = "memclk", "apb_pclk"; + clocks = <&clkc 11>, <&clkc 44>; + ranges = <0x0 0x0 0xe1000000 0x1000000 /* Nand CS region */ + 0x1 0x0 0xe2000000 0x2000000 /* SRAM/NOR CS0 region */ + 0x2 0x0 0xe4000000 0x2000000>; /* SRAM/NOR CS1 region */ + #address-cells = <2>; + #size-cells = <1>; + + nfc0: nand-controller@0,0 { + compatible = "arm,pl353-nand-r2p1"; + reg = <0 0 0x1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt b/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt deleted file mode 100644 index d56615fd343afd..00000000000000 --- a/Documentation/devicetree/bindings/memory-controllers/pl353-smc.txt +++ /dev/null @@ -1,47 +0,0 @@ -Device tree bindings for ARM PL353 static memory controller - -PL353 static memory controller supports two kinds of memory -interfaces.i.e NAND and SRAM/NOR interfaces. -The actual devices are instantiated from the child nodes of pl353 smc node. - -Required properties: -- compatible : Should be "arm,pl353-smc-r2p1", "arm,primecell". -- reg : Controller registers map and length. -- clock-names : List of input clock names - "memclk", "apb_pclk" - (See clock bindings for details). -- clocks : Clock phandles (see clock bindings for details). -- address-cells : Must be 2. -- size-cells : Must be 1. - -Child nodes: - For NAND the "arm,pl353-nand-r2p1" and for NOR the "cfi-flash" drivers are -supported as child nodes. - -for NAND partition information please refer the below file -Documentation/devicetree/bindings/mtd/partition.txt - -Example: - smcc: memory-controller@e000e000 - compatible = "arm,pl353-smc-r2p1", "arm,primecell"; - clock-names = "memclk", "apb_pclk"; - clocks = <&clkc 11>, <&clkc 44>; - reg = <0xe000e000 0x1000>; - #address-cells = <2>; - #size-cells = <1>; - ranges = <0x0 0x0 0xe1000000 0x1000000 //Nand CS Region - 0x1 0x0 0xe2000000 0x2000000 //SRAM/NOR CS Region - 0x2 0x0 0xe4000000 0x2000000>; //SRAM/NOR CS Region - nand_0: flash@e1000000 { - compatible = "arm,pl353-nand-r2p1" - reg = <0 0 0x1000000>; - (...) - }; - nor0: flash@e2000000 { - compatible = "cfi-flash"; - reg = <1 0 0x2000000>; - }; - nor1: flash@e4000000 { - compatible = "cfi-flash"; - reg = <2 0 0x2000000>; - }; - }; diff --git a/Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml b/Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml new file mode 100644 index 00000000000000..5f126bb9b202f2 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/arm,pl353-nand-r2p1.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: PL353 NAND Controller device tree bindings + +allOf: + - $ref: "nand-controller.yaml" + +maintainers: + - Miquel Raynal + - Naga Sureshkumar Relli + +properties: + compatible: + items: + - const: arm,pl353-nand-r2p1 + + reg: + items: + - items: + - description: CS with regard to the parent ranges property + - description: Offset of the memory region requested by the device + - description: Length of the memory region requested by the device + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + smcc: memory-controller@e000e000 { + compatible = "arm,pl353-smc-r2p1", "arm,primecell"; + reg = <0xe000e000 0x0001000>; + clock-names = "memclk", "apb_pclk"; + clocks = <&clkc 11>, <&clkc 44>; + ranges = <0x0 0x0 0xe1000000 0x1000000 /* Nand CS region */ + 0x1 0x0 0xe2000000 0x2000000 /* SRAM/NOR CS0 region */ + 0x2 0x0 0xe4000000 0x2000000>; /* SRAM/NOR CS1 region */ + #address-cells = <2>; + #size-cells = <1>; + + nfc0: nand-controller@0,0 { + compatible = "arm,pl353-nand-r2p1"; + reg = <0 0 0x1000000>; + #address-cells = <1>; + #size-cells = <0>; + }; + }; diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt deleted file mode 100644 index 44335a4f8bfb59..00000000000000 --- a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.txt +++ /dev/null @@ -1,186 +0,0 @@ -* Broadcom STB NAND Controller - -The Broadcom Set-Top Box NAND controller supports low-level access to raw NAND -flash chips. It has a memory-mapped register interface for both control -registers and for its data input/output buffer. On some SoCs, this controller is -paired with a custom DMA engine (inventively named "Flash DMA") which supports -basic PROGRAM and READ functions, among other features. - -This controller was originally designed for STB SoCs (BCM7xxx) but is now -available on a variety of Broadcom SoCs, including some BCM3xxx, BCM63xx, and -iProc/Cygnus. Its history includes several similar (but not fully register -compatible) versions. - -Required properties: -- compatible : May contain an SoC-specific compatibility string (see below) - to account for any SoC-specific hardware bits that may be - added on top of the base core controller. - In addition, must contain compatibility information about - the core NAND controller, of the following form: - "brcm,brcmnand" and an appropriate version compatibility - string, like "brcm,brcmnand-v7.0" - Possible values: - brcm,brcmnand-v2.1 - brcm,brcmnand-v2.2 - brcm,brcmnand-v4.0 - brcm,brcmnand-v5.0 - brcm,brcmnand-v6.0 - brcm,brcmnand-v6.1 - brcm,brcmnand-v6.2 - brcm,brcmnand-v7.0 - brcm,brcmnand-v7.1 - brcm,brcmnand-v7.2 - brcm,brcmnand-v7.3 - brcm,brcmnand -- reg : the register start and length for NAND register region. - (optional) Flash DMA register range (if present) - (optional) NAND flash cache range (if at non-standard offset) -- reg-names : a list of the names corresponding to the previous register - ranges. Should contain "nand" and (optionally) - "flash-dma" or "flash-edu" and/or "nand-cache". -- interrupts : The NAND CTLRDY interrupt, (if Flash DMA is available) - FLASH_DMA_DONE and if EDU is avaialble and used FLASH_EDU_DONE -- interrupt-names : May be "nand_ctlrdy" or "flash_dma_done" or "flash_edu_done", - if broken out as individual interrupts. - May be "nand", if the SoC has the individual NAND - interrupts multiplexed behind another custom piece of - hardware -- #address-cells : <1> - subnodes give the chip-select number -- #size-cells : <0> - -Optional properties: -- clock : reference to the clock for the NAND controller -- clock-names : "nand" (required for the above clock) -- brcm,nand-has-wp : Some versions of this IP include a write-protect - (WP) control bit. It is always available on >= - v7.0. Use this property to describe the rare - earlier versions of this core that include WP - - -- Additional SoC-specific NAND controller properties -- - -The NAND controller is integrated differently on the variety of SoCs on which it -is found. Part of this integration involves providing status and enable bits -with which to control the 8 exposed NAND interrupts, as well as hardware for -configuring the endianness of the data bus. On some SoCs, these features are -handled via standard, modular components (e.g., their interrupts look like a -normal IRQ chip), but on others, they are controlled in unique and interesting -ways, sometimes with registers that lump multiple NAND-related functions -together. The former case can be described simply by the standard interrupts -properties in the main controller node. But for the latter exceptional cases, -we define additional 'compatible' properties and associated register resources within the NAND controller node above. - - - compatible: Can be one of several SoC-specific strings. Each SoC may have - different requirements for its additional properties, as described below each - bullet point below. - - * "brcm,nand-bcm63138" - - reg: (required) the 'NAND_INT_BASE' register range, with separate status - and enable registers - - reg-names: (required) "nand-int-base" - - * "brcm,nand-bcm6368" - - compatible: should contain "brcm,nand-bcm", "brcm,nand-bcm6368" - - reg: (required) the 'NAND_INTR_BASE' register range, with combined status - and enable registers, and boot address registers - - reg-names: (required) "nand-int-base" - - * "brcm,nand-iproc" - - reg: (required) the "IDM" register range, for interrupt enable and APB - bus access endianness configuration, and the "EXT" register range, - for interrupt status/ack. - - reg-names: (required) a list of the names corresponding to the previous - register ranges. Should contain "iproc-idm" and "iproc-ext". - - -* NAND chip-select - -Each controller (compatible: "brcm,brcmnand") may contain one or more subnodes -to represent enabled chip-selects which (may) contain NAND flash chips. Their -properties are as follows. - -Required properties: -- compatible : should contain "brcm,nandcs" -- reg : a single integer representing the chip-select - number (e.g., 0, 1, 2, etc.) -- #address-cells : see partition.txt -- #size-cells : see partition.txt - -Optional properties: -- nand-ecc-strength : see nand-controller.yaml -- nand-ecc-step-size : must be 512 or 1024. See nand-controller.yaml -- nand-on-flash-bbt : boolean, to enable the on-flash BBT for this - chip-select. See nand-controller.yaml -- brcm,nand-oob-sector-size : integer, to denote the spare area sector size - expected for the ECC layout in use. This size, in - addition to the strength and step-size, - determines how the hardware BCH engine will lay - out the parity bytes it stores on the flash. - This property can be automatically determined by - the flash geometry (particularly the NAND page - and OOB size) in many cases, but when booting - from NAND, the boot controller has only a limited - number of available options for its default ECC - layout. - -Each nandcs device node may optionally contain sub-nodes describing the flash -partition mapping. See partition.txt for more detail. - - -Example: - -nand@f0442800 { - compatible = "brcm,brcmnand-v7.0", "brcm,brcmnand"; - reg = <0xF0442800 0x600>, - <0xF0443000 0x100>; - reg-names = "nand", "flash-dma"; - interrupt-parent = <&hif_intr2_intc>; - interrupts = <24>, <4>; - - #address-cells = <1>; - #size-cells = <0>; - - nandcs@1 { - compatible = "brcm,nandcs"; - reg = <1>; // Chip select 1 - nand-on-flash-bbt; - nand-ecc-strength = <12>; - nand-ecc-step-size = <512>; - - // Partitions - #address-cells = <1>; // <2>, for 64-bit offset - #size-cells = <1>; // <2>, for 64-bit length - flash0.rootfs@0 { - reg = <0 0x10000000>; - }; - flash0@0 { - reg = <0 0>; // MTDPART_SIZ_FULL - }; - flash0.kernel@10000000 { - reg = <0x10000000 0x400000>; - }; - }; -}; - -nand@10000200 { - compatible = "brcm,nand-bcm63168", "brcm,nand-bcm6368", - "brcm,brcmnand-v4.0", "brcm,brcmnand"; - reg = <0x10000200 0x180>, - <0x10000600 0x200>, - <0x100000b0 0x10>; - reg-names = "nand", "nand-cache", "nand-int-base"; - interrupt-parent = <&periph_intc>; - interrupts = <50>; - clocks = <&periph_clk 20>; - clock-names = "nand"; - - #address-cells = <1>; - #size-cells = <0>; - - nand0: nandcs@0 { - compatible = "brcm,nandcs"; - reg = <0>; - nand-on-flash-bbt; - nand-ecc-strength = <1>; - nand-ecc-step-size = <512>; - }; -}; diff --git a/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml new file mode 100644 index 00000000000000..e5f1a33332a5af --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/brcm,brcmnand.yaml @@ -0,0 +1,242 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/brcm,brcmnand.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Broadcom STB NAND Controller + +maintainers: + - Brian Norris + - Kamal Dasu + +description: | + The Broadcom Set-Top Box NAND controller supports low-level access to raw NAND + flash chips. It has a memory-mapped register interface for both control + registers and for its data input/output buffer. On some SoCs, this controller + is paired with a custom DMA engine (inventively named "Flash DMA") which + supports basic PROGRAM and READ functions, among other features. + + This controller was originally designed for STB SoCs (BCM7xxx) but is now + available on a variety of Broadcom SoCs, including some BCM3xxx, BCM63xx, and + iProc/Cygnus. Its history includes several similar (but not fully register + compatible) versions. + + -- Additional SoC-specific NAND controller properties -- + + The NAND controller is integrated differently on the variety of SoCs on which + it is found. Part of this integration involves providing status and enable + bits with which to control the 8 exposed NAND interrupts, as well as hardware + for configuring the endianness of the data bus. On some SoCs, these features + are handled via standard, modular components (e.g., their interrupts look like + a normal IRQ chip), but on others, they are controlled in unique and + interesting ways, sometimes with registers that lump multiple NAND-related + functions together. The former case can be described simply by the standard + interrupts properties in the main controller node. But for the latter + exceptional cases, we define additional 'compatible' properties and associated + register resources within the NAND controller node above. + +properties: + compatible: + oneOf: + - items: + - enum: + - brcm,brcmnand-v2.1 + - brcm,brcmnand-v2.2 + - brcm,brcmnand-v4.0 + - brcm,brcmnand-v5.0 + - brcm,brcmnand-v6.0 + - brcm,brcmnand-v6.1 + - brcm,brcmnand-v6.2 + - brcm,brcmnand-v7.0 + - brcm,brcmnand-v7.1 + - brcm,brcmnand-v7.2 + - brcm,brcmnand-v7.3 + - const: brcm,brcmnand + - description: BCM63138 SoC-specific NAND controller + items: + - const: brcm,nand-bcm63138 + - enum: + - brcm,brcmnand-v7.0 + - brcm,brcmnand-v7.1 + - const: brcm,brcmnand + - description: iProc SoC-specific NAND controller + items: + - const: brcm,nand-iproc + - const: brcm,brcmnand-v6.1 + - const: brcm,brcmnand + - description: BCM63168 SoC-specific NAND controller + items: + - const: brcm,nand-bcm63168 + - const: brcm,nand-bcm6368 + - const: brcm,brcmnand-v4.0 + - const: brcm,brcmnand + + reg: + minItems: 1 + maxItems: 6 + + reg-names: + minItems: 1 + maxItems: 6 + items: + enum: [ nand, flash-dma, flash-edu, nand-cache, nand-int-base, iproc-idm, iproc-ext ] + + interrupts: + minItems: 1 + maxItems: 3 + items: + - description: NAND CTLRDY interrupt + - description: FLASH_DMA_DONE if flash DMA is available + - description: FLASH_EDU_DONE if EDU is available + + interrupt-names: + minItems: 1 + maxItems: 3 + items: + - const: nand_ctlrdy + - const: flash_dma_done + - const: flash_edu_done + + clocks: + maxItems: 1 + description: reference to the clock for the NAND controller + + clock-names: + const: nand + + brcm,nand-has-wp: + description: > + Some versions of this IP include a write-protect + (WP) control bit. It is always available on >= + v7.0. Use this property to describe the rare + earlier versions of this core that include WP + type: boolean + +patternProperties: + "^nand@[a-f0-9]$": + type: object + properties: + compatible: + const: brcm,nandcs + + nand-ecc-step-size: + enum: [ 512, 1024 ] + + brcm,nand-oob-sector-size: + description: | + integer, to denote the spare area sector size + expected for the ECC layout in use. This size, in + addition to the strength and step-size, + determines how the hardware BCH engine will lay + out the parity bytes it stores on the flash. + This property can be automatically determined by + the flash geometry (particularly the NAND page + and OOB size) in many cases, but when booting + from NAND, the boot controller has only a limited + number of available options for its default ECC + layout. + $ref: /schemas/types.yaml#/definitions/uint32 + +allOf: + - $ref: nand-controller.yaml# + - if: + properties: + compatible: + contains: + const: brcm,nand-bcm63138 + then: + properties: + reg-names: + minItems: 2 + maxItems: 2 + items: + - const: nand + - const: nand-int-base + - if: + properties: + compatible: + contains: + const: brcm,nand-bcm6368 + then: + properties: + reg-names: + minItems: 3 + maxItems: 3 + items: + - const: nand + - const: nand-int-base + - const: nand-cache + - if: + properties: + compatible: + contains: + const: brcm,nand-iproc + then: + properties: + reg-names: + minItems: 3 + maxItems: 3 + items: + - const: nand + - const: iproc-idm + - const: iproc-ext + +unevaluatedProperties: false + +required: + - reg + - reg-names + - interrupts + +examples: + - | + nand-controller@f0442800 { + compatible = "brcm,brcmnand-v7.0", "brcm,brcmnand"; + reg = <0xf0442800 0x600>, + <0xf0443000 0x100>; + reg-names = "nand", "flash-dma"; + interrupt-parent = <&hif_intr2_intc>; + interrupts = <24>, <4>; + + #address-cells = <1>; + #size-cells = <0>; + + nand@1 { + compatible = "brcm,nandcs"; + reg = <1>; // Chip select 1 + nand-on-flash-bbt; + nand-ecc-strength = <12>; + nand-ecc-step-size = <512>; + + #address-cells = <1>; + #size-cells = <1>; + }; + }; + - | + nand-controller@10000200 { + compatible = "brcm,nand-bcm63168", "brcm,nand-bcm6368", + "brcm,brcmnand-v4.0", "brcm,brcmnand"; + reg = <0x10000200 0x180>, + <0x100000b0 0x10>, + <0x10000600 0x200>; + reg-names = "nand", "nand-int-base", "nand-cache"; + interrupt-parent = <&periph_intc>; + interrupts = <50>; + clocks = <&periph_clk 20>; + clock-names = "nand"; + + #address-cells = <1>; + #size-cells = <0>; + + nand@0 { + compatible = "brcm,nandcs"; + reg = <0>; + nand-on-flash-bbt; + nand-ecc-strength = <1>; + nand-ecc-step-size = <512>; + + #address-cells = <1>; + #size-cells = <1>; + }; + }; diff --git a/Documentation/devicetree/bindings/mtd/common.txt b/Documentation/devicetree/bindings/mtd/common.txt index fc068b923d7aa3..ae16f9ea86064d 100644 --- a/Documentation/devicetree/bindings/mtd/common.txt +++ b/Documentation/devicetree/bindings/mtd/common.txt @@ -1,15 +1 @@ -* Common properties of all MTD devices - -Optional properties: -- label: user-defined MTD device name. Can be used to assign user - friendly names to MTD devices (instead of the flash model or flash - controller based name) in order to ease flash device identification - and/or describe what they are used for. - -Example: - - flash@0 { - label = "System-firmware"; - - /* flash type specific properties */ - }; +This file has been moved to mtd.yaml. diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml index 5e7e5349f9a17f..ed590d7c6e3752 100644 --- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml +++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.yaml @@ -9,6 +9,9 @@ title: SPI NOR flash ST M25Pxx (and similar) serial flash chips maintainers: - Rob Herring +allOf: + - $ref: "mtd.yaml#" + properties: compatible: oneOf: @@ -82,6 +85,9 @@ patternProperties: '^partition@': type: object + "^otp(-[0-9]+)?$": + type: object + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml new file mode 100644 index 00000000000000..2cdf6bf3dc4ab2 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/microchip,mchp48l640.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/mtd/microchip,mchp48l640.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Microchip 48l640 (and similar) serial EERAM bindings + +maintainers: + - Heiko Schocher + +description: | + The Microchip 48l640 is a 8KByte EERAM connected via SPI. + + datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20006055B.pdf + +properties: + compatible: + items: + - const: microchip,48l640 + + reg: + maxItems: 1 + + spi-max-frequency: true + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + eeram@0 { + compatible = "microchip,48l640"; + reg = <0>; + spi-max-frequency = <20000000>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/mtd/mtd.yaml b/Documentation/devicetree/bindings/mtd/mtd.yaml new file mode 100644 index 00000000000000..376b679cfc701b --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/mtd.yaml @@ -0,0 +1,89 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/mtd.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MTD (Memory Technology Device) Device Tree Bindings + +maintainers: + - Miquel Raynal + - Richard Weinberger + +properties: + $nodename: + pattern: "^flash(@.*)?$" + + label: + description: + User-defined MTD device name. Can be used to assign user friendly + names to MTD devices (instead of the flash model or flash controller + based name) in order to ease flash device identification and/or + describe what they are used for. + +patternProperties: + "^otp(-[0-9]+)?$": + type: object + $ref: ../nvmem/nvmem.yaml# + + description: | + An OTP memory region. Some flashes provide a one-time-programmable + memory whose content can either be programmed by a user or is already + pre-programmed by the factory. Some flashes might provide both. + + properties: + compatible: + enum: + - user-otp + - factory-otp + + required: + - compatible + +additionalProperties: true + +examples: + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + flash@0 { + reg = <0>; + compatible = "jedec,spi-nor"; + label = "System-firmware"; + }; + }; + + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + flash@0 { + reg = <0>; + compatible = "jedec,spi-nor"; + + otp-1 { + compatible = "factory-otp"; + #address-cells = <1>; + #size-cells = <1>; + + electronic-serial-number@0 { + reg = <0 8>; + }; + }; + + otp-2 { + compatible = "user-otp"; + #address-cells = <1>; + #size-cells = <1>; + + mac-address@0 { + reg = <0 6>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/mtd/nand-controller.yaml b/Documentation/devicetree/bindings/mtd/nand-controller.yaml index 678b3995250264..bd217e6f5018a0 100644 --- a/Documentation/devicetree/bindings/mtd/nand-controller.yaml +++ b/Documentation/devicetree/bindings/mtd/nand-controller.yaml @@ -38,6 +38,17 @@ properties: ranges: true + cs-gpios: + minItems: 1 + maxItems: 8 + description: + Array of chip-select available to the controller. The first + entries are a 1:1 mapping of the available chip-select on the + NAND controller (even if they are not used). As many additional + chip-select as needed may follow and should be phandles of GPIO + lines. 'reg' entries of the NAND chip subnodes become indexes of + this array when this property is present. + patternProperties: "^nand@[a-f0-9]$": type: object @@ -164,14 +175,19 @@ examples: nand-controller { #address-cells = <1>; #size-cells = <0>; + cs-gpios = <0>, <&gpioA 1>; /* A single native CS is available */ /* controller specific properties */ nand@0 { - reg = <0>; + reg = <0>; /* Native CS */ nand-use-soft-ecc-engine; nand-ecc-algo = "bch"; /* controller specific properties */ }; + + nand@1 { + reg = <1>; /* GPIO CS */ + }; }; diff --git a/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt index b677147ca4e108..c2175d3c82ecf5 100644 --- a/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt +++ b/Documentation/devicetree/bindings/mtd/partitions/brcm,trx.txt @@ -28,6 +28,11 @@ detected by a software parsing TRX header. Required properties: - compatible : (required) must be "brcm,trx" +Optional properties: + +- brcm,trx-magic: TRX magic, if it is different from the default magic + 0x30524448 as a u32. + Example: flash@0 { diff --git a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt deleted file mode 100644 index ccfd37b8a0ad3e..00000000000000 --- a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.txt +++ /dev/null @@ -1,51 +0,0 @@ -Bindings for HyperBus Memory Controller (HBMC) on TI's K3 family of SoCs - -Required properties: -- compatible : "ti,am654-hbmc" for AM654 SoC -- reg : Two entries: - First entry pointed to the register space of HBMC controller - Second entry pointing to the memory map region dedicated for - MMIO access to attached flash devices -- ranges : Address translation from offset within CS to allocated MMIO - space in SoC - -Optional properties: -- mux-controls : phandle to the multiplexer that controls selection of - HBMC vs OSPI inside Flash SubSystem (FSS). Default is OSPI, - if property is absent. - See Documentation/devicetree/bindings/mux/reg-mux.yaml - for mmio-mux binding details - -Example: - - system-controller@47000000 { - compatible = "syscon", "simple-mfd"; - reg = <0x0 0x47000000 0x0 0x100>; - #address-cells = <2>; - #size-cells = <2>; - ranges; - - hbmc_mux: multiplexer { - compatible = "mmio-mux"; - #mux-control-cells = <1>; - mux-reg-masks = <0x4 0x2>; /* 0: reg 0x4, bit 1 */ - }; - }; - - hbmc: hyperbus@47034000 { - compatible = "ti,am654-hbmc"; - reg = <0x0 0x47034000 0x0 0x100>, - <0x5 0x00000000 0x1 0x0000000>; - power-domains = <&k3_pds 55>; - #address-cells = <2>; - #size-cells = <1>; - ranges = <0x0 0x0 0x5 0x00000000 0x4000000>, /* CS0 - 64MB */ - <0x1 0x0 0x5 0x04000000 0x4000000>; /* CS1 - 64MB */ - mux-controls = <&hbmc_mux 0>; - - /* Slave flash node */ - flash@0,0 { - compatible = "cypress,hyperflash", "cfi-flash"; - reg = <0x0 0x0 0x4000000>; - }; - }; diff --git a/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml new file mode 100644 index 00000000000000..30b458c41cac39 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/ti,am654-hbmc.yaml @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/mtd/ti,am654-hbmc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HyperBus Memory Controller (HBMC) on TI's K3 family of SoCs + +maintainers: + - Vignesh Raghavendra + +properties: + compatible: + const: ti,am654-hbmc + + reg: + maxItems: 2 + + power-domains: true + '#address-cells': true + '#size-cells': true + ranges: true + + mux-controls: + description: MMIO mux controller node to select b/w OSPI and HBMC. + + clocks: + maxItems: 1 + +patternProperties: + "^flash@[0-1],[0-9a-f]+$": + type: object + +required: + - compatible + - reg + - ranges + - clocks + - '#address-cells' + - '#size-cells' + +additionalProperties: false + +examples: + - | + bus { + #address-cells = <2>; + #size-cells = <2>; + + hbmc: memory-controller@47034000 { + compatible = "ti,am654-hbmc"; + reg = <0x0 0x47034000 0x0 0x100>, + <0x5 0x00000000 0x1 0x0000000>; + ranges = <0x0 0x0 0x5 0x00000000 0x4000000>, /* CS0 - 64MB */ + <0x1 0x0 0x5 0x04000000 0x4000000>; /* CS1 - 64MB */ + clocks = <&k3_clks 102 0>; + #address-cells = <2>; + #size-cells = <1>; + power-domains = <&k3_pds 55>; + mux-controls = <&hbmc_mux 0>; + + flash@0,0 { + compatible = "cypress,hyperflash", "cfi-flash"; + reg = <0x0 0x0 0x4000000>; + #address-cells = <1>; + #size-cells = <1>; + }; + }; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 3ccd3b0d78326e..7b9c0197b2a77d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1319,6 +1319,7 @@ W: http://www.aquantia.com F: drivers/net/ethernet/aquantia/atlantic/aq_ptp* ARASAN NAND CONTROLLER DRIVER +M: Miquel Raynal M: Naga Sureshkumar Relli L: linux-mtd@lists.infradead.org S: Maintained @@ -1460,6 +1461,22 @@ S: Odd Fixes F: drivers/amba/ F: include/linux/amba/bus.h +ARM PRIMECELL PL35X NAND CONTROLLER DRIVER +M: Miquel Raynal +M: Naga Sureshkumar Relli +L: linux-mtd@lists.infradead.org +S: Maintained +F: Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml +F: drivers/mtd/nand/raw/pl35x-nand-controller.c + +ARM PRIMECELL PL35X SMC DRIVER +M: Miquel Raynal +M: Naga Sureshkumar Relli +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S: Maintained +F: Documentation/devicetree/bindings/mtd/arm,pl353-smc.yaml +F: drivers/memory/pl353-smc.c + ARM PRIMECELL CLCD PL110 DRIVER M: Russell King S: Odd Fixes diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c index 9c0a2841677739..925d856663ac8d 100644 --- a/drivers/memory/pl353-smc.c +++ b/drivers/memory/pl353-smc.c @@ -8,263 +8,22 @@ */ #include -#include #include #include #include #include -#include -#include #include -/* Register definitions */ -#define PL353_SMC_MEMC_STATUS_OFFS 0 /* Controller status reg, RO */ -#define PL353_SMC_CFG_CLR_OFFS 0xC /* Clear config reg, WO */ -#define PL353_SMC_DIRECT_CMD_OFFS 0x10 /* Direct command reg, WO */ -#define PL353_SMC_SET_CYCLES_OFFS 0x14 /* Set cycles register, WO */ -#define PL353_SMC_SET_OPMODE_OFFS 0x18 /* Set opmode register, WO */ -#define PL353_SMC_ECC_STATUS_OFFS 0x400 /* ECC status register */ -#define PL353_SMC_ECC_MEMCFG_OFFS 0x404 /* ECC mem config reg */ -#define PL353_SMC_ECC_MEMCMD1_OFFS 0x408 /* ECC mem cmd1 reg */ -#define PL353_SMC_ECC_MEMCMD2_OFFS 0x40C /* ECC mem cmd2 reg */ -#define PL353_SMC_ECC_VALUE0_OFFS 0x418 /* ECC value 0 reg */ - -/* Controller status register specific constants */ -#define PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT 6 - -/* Clear configuration register specific constants */ -#define PL353_SMC_CFG_CLR_INT_CLR_1 0x10 -#define PL353_SMC_CFG_CLR_ECC_INT_DIS_1 0x40 -#define PL353_SMC_CFG_CLR_INT_DIS_1 0x2 -#define PL353_SMC_CFG_CLR_DEFAULT_MASK (PL353_SMC_CFG_CLR_INT_CLR_1 | \ - PL353_SMC_CFG_CLR_ECC_INT_DIS_1 | \ - PL353_SMC_CFG_CLR_INT_DIS_1) - -/* Set cycles register specific constants */ -#define PL353_SMC_SET_CYCLES_T0_MASK 0xF -#define PL353_SMC_SET_CYCLES_T0_SHIFT 0 -#define PL353_SMC_SET_CYCLES_T1_MASK 0xF -#define PL353_SMC_SET_CYCLES_T1_SHIFT 4 -#define PL353_SMC_SET_CYCLES_T2_MASK 0x7 -#define PL353_SMC_SET_CYCLES_T2_SHIFT 8 -#define PL353_SMC_SET_CYCLES_T3_MASK 0x7 -#define PL353_SMC_SET_CYCLES_T3_SHIFT 11 -#define PL353_SMC_SET_CYCLES_T4_MASK 0x7 -#define PL353_SMC_SET_CYCLES_T4_SHIFT 14 -#define PL353_SMC_SET_CYCLES_T5_MASK 0x7 -#define PL353_SMC_SET_CYCLES_T5_SHIFT 17 -#define PL353_SMC_SET_CYCLES_T6_MASK 0xF -#define PL353_SMC_SET_CYCLES_T6_SHIFT 20 - -/* ECC status register specific constants */ -#define PL353_SMC_ECC_STATUS_BUSY BIT(6) -#define PL353_SMC_ECC_REG_SIZE_OFFS 4 - -/* ECC memory config register specific constants */ -#define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC -#define PL353_SMC_ECC_MEMCFG_MODE_SHIFT 2 -#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0x3 - -#define PL353_SMC_DC_UPT_NAND_REGS ((4 << 23) | /* CS: NAND chip */ \ - (2 << 21)) /* UpdateRegs operation */ - -#define PL353_NAND_ECC_CMD1 ((0x80) | /* Write command */ \ - (0 << 8) | /* Read command */ \ - (0x30 << 16) | /* Read End command */ \ - (1 << 24)) /* Read End command calid */ - -#define PL353_NAND_ECC_CMD2 ((0x85) | /* Write col change cmd */ \ - (5 << 8) | /* Read col change cmd */ \ - (0xE0 << 16) | /* Read col change end cmd */ \ - (1 << 24)) /* Read col change end cmd valid */ -#define PL353_NAND_ECC_BUSY_TIMEOUT (1 * HZ) /** * struct pl353_smc_data - Private smc driver structure * @memclk: Pointer to the peripheral clock - * @aclk: Pointer to the APER clock + * @aclk: Pointer to the AXI peripheral clock */ struct pl353_smc_data { struct clk *memclk; struct clk *aclk; }; -/* SMC virtual register base */ -static void __iomem *pl353_smc_base; - -/** - * pl353_smc_set_buswidth - Set memory buswidth - * @bw: Memory buswidth (8 | 16) - * Return: 0 on success or negative errno. - */ -int pl353_smc_set_buswidth(unsigned int bw) -{ - if (bw != PL353_SMC_MEM_WIDTH_8 && bw != PL353_SMC_MEM_WIDTH_16) - return -EINVAL; - - writel(bw, pl353_smc_base + PL353_SMC_SET_OPMODE_OFFS); - writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + - PL353_SMC_DIRECT_CMD_OFFS); - - return 0; -} -EXPORT_SYMBOL_GPL(pl353_smc_set_buswidth); - -/** - * pl353_smc_set_cycles - Set memory timing parameters - * @timings: NAND controller timing parameters - * - * Sets NAND chip specific timing parameters. - */ -void pl353_smc_set_cycles(u32 timings[]) -{ - /* - * Set write pulse timing. This one is easy to extract: - * - * NWE_PULSE = tWP - */ - timings[0] &= PL353_SMC_SET_CYCLES_T0_MASK; - timings[1] = (timings[1] & PL353_SMC_SET_CYCLES_T1_MASK) << - PL353_SMC_SET_CYCLES_T1_SHIFT; - timings[2] = (timings[2] & PL353_SMC_SET_CYCLES_T2_MASK) << - PL353_SMC_SET_CYCLES_T2_SHIFT; - timings[3] = (timings[3] & PL353_SMC_SET_CYCLES_T3_MASK) << - PL353_SMC_SET_CYCLES_T3_SHIFT; - timings[4] = (timings[4] & PL353_SMC_SET_CYCLES_T4_MASK) << - PL353_SMC_SET_CYCLES_T4_SHIFT; - timings[5] = (timings[5] & PL353_SMC_SET_CYCLES_T5_MASK) << - PL353_SMC_SET_CYCLES_T5_SHIFT; - timings[6] = (timings[6] & PL353_SMC_SET_CYCLES_T6_MASK) << - PL353_SMC_SET_CYCLES_T6_SHIFT; - timings[0] |= timings[1] | timings[2] | timings[3] | - timings[4] | timings[5] | timings[6]; - - writel(timings[0], pl353_smc_base + PL353_SMC_SET_CYCLES_OFFS); - writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + - PL353_SMC_DIRECT_CMD_OFFS); -} -EXPORT_SYMBOL_GPL(pl353_smc_set_cycles); - -/** - * pl353_smc_ecc_is_busy - Read ecc busy flag - * Return: the ecc_status bit from the ecc_status register. 1 = busy, 0 = idle - */ -bool pl353_smc_ecc_is_busy(void) -{ - return ((readl(pl353_smc_base + PL353_SMC_ECC_STATUS_OFFS) & - PL353_SMC_ECC_STATUS_BUSY) == PL353_SMC_ECC_STATUS_BUSY); -} -EXPORT_SYMBOL_GPL(pl353_smc_ecc_is_busy); - -/** - * pl353_smc_get_ecc_val - Read ecc_valueN registers - * @ecc_reg: Index of the ecc_value reg (0..3) - * Return: the content of the requested ecc_value register. - * - * There are four valid ecc_value registers. The argument is truncated to stay - * within this valid boundary. - */ -u32 pl353_smc_get_ecc_val(int ecc_reg) -{ - u32 addr, reg; - - addr = PL353_SMC_ECC_VALUE0_OFFS + - (ecc_reg * PL353_SMC_ECC_REG_SIZE_OFFS); - reg = readl(pl353_smc_base + addr); - - return reg; -} -EXPORT_SYMBOL_GPL(pl353_smc_get_ecc_val); - -/** - * pl353_smc_get_nand_int_status_raw - Get NAND interrupt status bit - * Return: the raw_int_status1 bit from the memc_status register - */ -int pl353_smc_get_nand_int_status_raw(void) -{ - u32 reg; - - reg = readl(pl353_smc_base + PL353_SMC_MEMC_STATUS_OFFS); - reg >>= PL353_SMC_MEMC_STATUS_RAW_INT_1_SHIFT; - reg &= 1; - - return reg; -} -EXPORT_SYMBOL_GPL(pl353_smc_get_nand_int_status_raw); - -/** - * pl353_smc_clr_nand_int - Clear NAND interrupt - */ -void pl353_smc_clr_nand_int(void) -{ - writel(PL353_SMC_CFG_CLR_INT_CLR_1, - pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); -} -EXPORT_SYMBOL_GPL(pl353_smc_clr_nand_int); - -/** - * pl353_smc_set_ecc_mode - Set SMC ECC mode - * @mode: ECC mode (BYPASS, APB, MEM) - * Return: 0 on success or negative errno. - */ -int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode) -{ - u32 reg; - int ret = 0; - - switch (mode) { - case PL353_SMC_ECCMODE_BYPASS: - case PL353_SMC_ECCMODE_APB: - case PL353_SMC_ECCMODE_MEM: - - reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); - reg &= ~PL353_SMC_ECC_MEMCFG_MODE_MASK; - reg |= mode << PL353_SMC_ECC_MEMCFG_MODE_SHIFT; - writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); - - break; - default: - ret = -EINVAL; - } - - return ret; -} -EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_mode); - -/** - * pl353_smc_set_ecc_pg_size - Set SMC ECC page size - * @pg_sz: ECC page size - * Return: 0 on success or negative errno. - */ -int pl353_smc_set_ecc_pg_size(unsigned int pg_sz) -{ - u32 reg, sz; - - switch (pg_sz) { - case 0: - sz = 0; - break; - case SZ_512: - sz = 1; - break; - case SZ_1K: - sz = 2; - break; - case SZ_2K: - sz = 3; - break; - default: - return -EINVAL; - } - - reg = readl(pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); - reg &= ~PL353_SMC_ECC_MEMCFG_PGSIZE_MASK; - reg |= sz; - writel(reg, pl353_smc_base + PL353_SMC_ECC_MEMCFG_OFFS); - - return 0; -} -EXPORT_SYMBOL_GPL(pl353_smc_set_ecc_pg_size); - static int __maybe_unused pl353_smc_suspend(struct device *dev) { struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev); @@ -277,8 +36,8 @@ static int __maybe_unused pl353_smc_suspend(struct device *dev) static int __maybe_unused pl353_smc_resume(struct device *dev) { - int ret; struct pl353_smc_data *pl353_smc = dev_get_drvdata(dev); + int ret; ret = clk_enable(pl353_smc->aclk); if (ret) { @@ -296,77 +55,31 @@ static int __maybe_unused pl353_smc_resume(struct device *dev) return ret; } -static struct amba_driver pl353_smc_driver; - static SIMPLE_DEV_PM_OPS(pl353_smc_dev_pm_ops, pl353_smc_suspend, pl353_smc_resume); -/** - * pl353_smc_init_nand_interface - Initialize the NAND interface - * @adev: Pointer to the amba_device struct - * @nand_node: Pointer to the pl353_nand device_node struct - */ -static void pl353_smc_init_nand_interface(struct amba_device *adev, - struct device_node *nand_node) -{ - unsigned long timeout; - - pl353_smc_set_buswidth(PL353_SMC_MEM_WIDTH_8); - writel(PL353_SMC_CFG_CLR_INT_CLR_1, - pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); - writel(PL353_SMC_DC_UPT_NAND_REGS, pl353_smc_base + - PL353_SMC_DIRECT_CMD_OFFS); - - timeout = jiffies + PL353_NAND_ECC_BUSY_TIMEOUT; - /* Wait till the ECC operation is complete */ - do { - if (pl353_smc_ecc_is_busy()) - cpu_relax(); - else - break; - } while (!time_after_eq(jiffies, timeout)); - - if (time_after_eq(jiffies, timeout)) - return; - - writel(PL353_NAND_ECC_CMD1, - pl353_smc_base + PL353_SMC_ECC_MEMCMD1_OFFS); - writel(PL353_NAND_ECC_CMD2, - pl353_smc_base + PL353_SMC_ECC_MEMCMD2_OFFS); -} - static const struct of_device_id pl353_smc_supported_children[] = { { .compatible = "cfi-flash" }, { .compatible = "arm,pl353-nand-r2p1", - .data = pl353_smc_init_nand_interface }, {} }; static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) { + struct device_node *of_node = adev->dev.of_node; + const struct of_device_id *match = NULL; struct pl353_smc_data *pl353_smc; struct device_node *child; - struct resource *res; int err; - struct device_node *of_node = adev->dev.of_node; - static void (*init)(struct amba_device *adev, - struct device_node *nand_node); - const struct of_device_id *match = NULL; pl353_smc = devm_kzalloc(&adev->dev, sizeof(*pl353_smc), GFP_KERNEL); if (!pl353_smc) return -ENOMEM; - /* Get the NAND controller virtual address */ - res = &adev->res; - pl353_smc_base = devm_ioremap_resource(&adev->dev, res); - if (IS_ERR(pl353_smc_base)) - return PTR_ERR(pl353_smc_base); - pl353_smc->aclk = devm_clk_get(&adev->dev, "apb_pclk"); if (IS_ERR(pl353_smc->aclk)) { dev_err(&adev->dev, "aclk clock not found.\n"); @@ -388,15 +101,11 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) err = clk_prepare_enable(pl353_smc->memclk); if (err) { dev_err(&adev->dev, "Unable to enable memory clock.\n"); - goto out_clk_dis_aper; + goto disable_axi_clk; } amba_set_drvdata(adev, pl353_smc); - /* clear interrupts */ - writel(PL353_SMC_CFG_CLR_DEFAULT_MASK, - pl353_smc_base + PL353_SMC_CFG_CLR_OFFS); - /* Find compatible children. Only a single child is supported */ for_each_available_child_of_node(of_node, child) { match = of_match_node(pl353_smc_supported_children, child); @@ -408,19 +117,16 @@ static int pl353_smc_probe(struct amba_device *adev, const struct amba_id *id) } if (!match) { dev_err(&adev->dev, "no matching children\n"); - goto out_clk_disable; + goto disable_mem_clk; } - init = match->data; - if (init) - init(adev, child); of_platform_device_create(child, NULL, &adev->dev); return 0; -out_clk_disable: +disable_mem_clk: clk_disable_unprepare(pl353_smc->memclk); -out_clk_dis_aper: +disable_axi_clk: clk_disable_unprepare(pl353_smc->aclk); return err; @@ -436,8 +142,8 @@ static void pl353_smc_remove(struct amba_device *adev) static const struct amba_id pl353_ids[] = { { - .id = 0x00041353, - .mask = 0x000fffff, + .id = 0x00041353, + .mask = 0x000fffff, }, { 0, 0 }, }; diff --git a/drivers/mtd/chips/chipreg.c b/drivers/mtd/chips/chipreg.c index ff86373d7d2439..a05e103682a483 100644 --- a/drivers/mtd/chips/chipreg.c +++ b/drivers/mtd/chips/chipreg.c @@ -31,14 +31,11 @@ void unregister_mtd_chip_driver(struct mtd_chip_driver *drv) static struct mtd_chip_driver *get_mtd_chip_driver (const char *name) { - struct list_head *pos; struct mtd_chip_driver *ret = NULL, *this; spin_lock(&chip_drvs_lock); - list_for_each(pos, &chip_drvs_list) { - this = list_entry(pos, typeof(*this), list); - + list_for_each_entry(this, &chip_drvs_list, list) { if (!strcmp(this->name, name)) { ret = this; break; diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 0f4c2d823de84a..79cb981ececc9a 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -89,6 +89,12 @@ config MTD_MCHP23K256 platform data, or a device tree description if you want to specify device partitioning +config MTD_MCHP48L640 + tristate "Microchip 48L640 EERAM" + depends on SPI_MASTER + help + This enables access to Microchip 48L640 EERAM chips, using SPI. + config MTD_SPEAR_SMI tristate "SPEAR MTD NOR Support through SMI controller" depends on PLAT_SPEAR || COMPILE_TEST diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 991c8d12c01605..0362cf6bdc67f0 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_MTD_LART) += lart.o obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o obj-$(CONFIG_MTD_MCHP23K256) += mchp23k256.o +obj-$(CONFIG_MTD_MCHP48L640) += mchp48l640.o obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o obj-$(CONFIG_MTD_SST25L) += sst25l.o obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o diff --git a/drivers/mtd/devices/mchp48l640.c b/drivers/mtd/devices/mchp48l640.c new file mode 100644 index 00000000000000..efc2003bd13a47 --- /dev/null +++ b/drivers/mtd/devices/mchp48l640.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Microchip 48L640 64 Kb SPI Serial EERAM + * + * Copyright Heiko Schocher + * + * datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20006055B.pdf + * + * we set continuous mode but reading/writing more bytes than + * pagesize seems to bring chip into state where readden values + * are wrong ... no idea why. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct mchp48_caps { + unsigned int size; + unsigned int page_size; +}; + +struct mchp48l640_flash { + struct spi_device *spi; + struct mutex lock; + struct mtd_info mtd; + const struct mchp48_caps *caps; +}; + +#define MCHP48L640_CMD_WREN 0x06 +#define MCHP48L640_CMD_WRDI 0x04 +#define MCHP48L640_CMD_WRITE 0x02 +#define MCHP48L640_CMD_READ 0x03 +#define MCHP48L640_CMD_WRSR 0x01 +#define MCHP48L640_CMD_RDSR 0x05 + +#define MCHP48L640_STATUS_RDY 0x01 +#define MCHP48L640_STATUS_WEL 0x02 +#define MCHP48L640_STATUS_BP0 0x04 +#define MCHP48L640_STATUS_BP1 0x08 +#define MCHP48L640_STATUS_SWM 0x10 +#define MCHP48L640_STATUS_PRO 0x20 +#define MCHP48L640_STATUS_ASE 0x40 + +#define MCHP48L640_TIMEOUT 100 + +#define MAX_CMD_SIZE 0x10 + +#define to_mchp48l640_flash(x) container_of(x, struct mchp48l640_flash, mtd) + +static int mchp48l640_mkcmd(struct mchp48l640_flash *flash, u8 cmd, loff_t addr, char *buf) +{ + buf[0] = cmd; + buf[1] = addr >> 8; + buf[2] = addr; + + return 3; +} + +static int mchp48l640_read_status(struct mchp48l640_flash *flash, int *status) +{ + unsigned char cmd[2]; + int ret; + + cmd[0] = MCHP48L640_CMD_RDSR; + cmd[1] = 0x00; + mutex_lock(&flash->lock); + ret = spi_write_then_read(flash->spi, &cmd[0], 1, &cmd[1], 1); + mutex_unlock(&flash->lock); + if (!ret) + *status = cmd[1]; + dev_dbg(&flash->spi->dev, "read status ret: %d status: %x", ret, *status); + + return ret; +} + +static int mchp48l640_waitforbit(struct mchp48l640_flash *flash, int bit, bool set) +{ + int ret, status; + unsigned long deadline; + + deadline = jiffies + msecs_to_jiffies(MCHP48L640_TIMEOUT); + do { + ret = mchp48l640_read_status(flash, &status); + dev_dbg(&flash->spi->dev, "read status ret: %d bit: %x %sset status: %x", + ret, bit, (set ? "" : "not"), status); + if (ret) + return ret; + + if (set) { + if ((status & bit) == bit) + return 0; + } else { + if ((status & bit) == 0) + return 0; + } + + usleep_range(1000, 2000); + } while (!time_after_eq(jiffies, deadline)); + + dev_err(&flash->spi->dev, "Timeout waiting for bit %x %s set in status register.", + bit, (set ? "" : "not")); + return -ETIMEDOUT; +} + +static int mchp48l640_write_prepare(struct mchp48l640_flash *flash, bool enable) +{ + unsigned char cmd[2]; + int ret; + + if (enable) + cmd[0] = MCHP48L640_CMD_WREN; + else + cmd[0] = MCHP48L640_CMD_WRDI; + + mutex_lock(&flash->lock); + ret = spi_write(flash->spi, cmd, 1); + mutex_unlock(&flash->lock); + + if (ret) + dev_err(&flash->spi->dev, "write %sable failed ret: %d", + (enable ? "en" : "dis"), ret); + + dev_dbg(&flash->spi->dev, "write %sable success ret: %d", + (enable ? "en" : "dis"), ret); + if (enable) + return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, true); + + return ret; +} + +static int mchp48l640_set_mode(struct mchp48l640_flash *flash) +{ + unsigned char cmd[2]; + int ret; + + ret = mchp48l640_write_prepare(flash, true); + if (ret) + return ret; + + cmd[0] = MCHP48L640_CMD_WRSR; + cmd[1] = MCHP48L640_STATUS_PRO; + + mutex_lock(&flash->lock); + ret = spi_write(flash->spi, cmd, 2); + mutex_unlock(&flash->lock); + if (ret) + dev_err(&flash->spi->dev, "Could not set continuous mode ret: %d", ret); + + return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_PRO, true); +} + +static int mchp48l640_wait_rdy(struct mchp48l640_flash *flash) +{ + return mchp48l640_waitforbit(flash, MCHP48L640_STATUS_RDY, false); +}; + +static int mchp48l640_write_page(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const unsigned char *buf) +{ + struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd); + unsigned char *cmd; + int ret; + int cmdlen; + + cmd = kmalloc((3 + len), GFP_KERNEL | GFP_DMA); + if (!cmd) + return -ENOMEM; + + ret = mchp48l640_wait_rdy(flash); + if (ret) + goto fail; + + ret = mchp48l640_write_prepare(flash, true); + if (ret) + goto fail; + + mutex_lock(&flash->lock); + cmdlen = mchp48l640_mkcmd(flash, MCHP48L640_CMD_WRITE, to, cmd); + memcpy(&cmd[cmdlen], buf, len); + ret = spi_write(flash->spi, cmd, cmdlen + len); + mutex_unlock(&flash->lock); + if (!ret) + *retlen += len; + else + goto fail; + + ret = mchp48l640_waitforbit(flash, MCHP48L640_STATUS_WEL, false); + if (ret) + goto fail; + + kfree(cmd); + return 0; +fail: + kfree(cmd); + dev_err(&flash->spi->dev, "write fail with: %d", ret); + return ret; +}; + +static int mchp48l640_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const unsigned char *buf) +{ + struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd); + int ret; + size_t wlen = 0; + loff_t woff = to; + size_t ws; + size_t page_sz = flash->caps->page_size; + + /* + * we set PRO bit (page rollover), but writing length > page size + * does result in total chaos, so write in 32 byte chunks. + */ + while (wlen < len) { + ws = min((len - wlen), page_sz); + ret = mchp48l640_write_page(mtd, woff, ws, retlen, &buf[wlen]); + if (ret) + return ret; + wlen += ws; + woff += ws; + } + + return ret; +} + +static int mchp48l640_read_page(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, unsigned char *buf) +{ + struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd); + unsigned char *cmd; + int ret; + int cmdlen; + + cmd = kmalloc((3 + len), GFP_KERNEL | GFP_DMA); + if (!cmd) + return -ENOMEM; + + ret = mchp48l640_wait_rdy(flash); + if (ret) + goto fail; + + mutex_lock(&flash->lock); + cmdlen = mchp48l640_mkcmd(flash, MCHP48L640_CMD_READ, from, cmd); + ret = spi_write_then_read(flash->spi, cmd, cmdlen, buf, len); + mutex_unlock(&flash->lock); + if (!ret) + *retlen += len; + + return ret; + +fail: + kfree(cmd); + dev_err(&flash->spi->dev, "read fail with: %d", ret); + return ret; +} + +static int mchp48l640_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, unsigned char *buf) +{ + struct mchp48l640_flash *flash = to_mchp48l640_flash(mtd); + int ret; + size_t wlen = 0; + loff_t woff = from; + size_t ws; + size_t page_sz = flash->caps->page_size; + + /* + * we set PRO bit (page rollover), but if read length > page size + * does result in total chaos in result ... + */ + while (wlen < len) { + ws = min((len - wlen), page_sz); + ret = mchp48l640_read_page(mtd, woff, ws, retlen, &buf[wlen]); + if (ret) + return ret; + wlen += ws; + woff += ws; + } + + return ret; +}; + +static const struct mchp48_caps mchp48l640_caps = { + .size = SZ_8K, + .page_size = 32, +}; + +static int mchp48l640_probe(struct spi_device *spi) +{ + struct mchp48l640_flash *flash; + struct flash_platform_data *data; + int err; + int status; + + flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL); + if (!flash) + return -ENOMEM; + + flash->spi = spi; + mutex_init(&flash->lock); + spi_set_drvdata(spi, flash); + + err = mchp48l640_read_status(flash, &status); + if (err) + return err; + + err = mchp48l640_set_mode(flash); + if (err) + return err; + + data = dev_get_platdata(&spi->dev); + + flash->caps = of_device_get_match_data(&spi->dev); + if (!flash->caps) + flash->caps = &mchp48l640_caps; + + mtd_set_of_node(&flash->mtd, spi->dev.of_node); + flash->mtd.dev.parent = &spi->dev; + flash->mtd.type = MTD_RAM; + flash->mtd.flags = MTD_CAP_RAM; + flash->mtd.writesize = flash->caps->page_size; + flash->mtd.size = flash->caps->size; + flash->mtd._read = mchp48l640_read; + flash->mtd._write = mchp48l640_write; + + err = mtd_device_register(&flash->mtd, data ? data->parts : NULL, + data ? data->nr_parts : 0); + if (err) + return err; + + return 0; +} + +static int mchp48l640_remove(struct spi_device *spi) +{ + struct mchp48l640_flash *flash = spi_get_drvdata(spi); + + return mtd_device_unregister(&flash->mtd); +} + +static const struct of_device_id mchp48l640_of_table[] = { + { + .compatible = "microchip,48l640", + .data = &mchp48l640_caps, + }, + {} +}; +MODULE_DEVICE_TABLE(of, mchp48l640_of_table); + +static struct spi_driver mchp48l640_driver = { + .driver = { + .name = "mchp48l640", + .of_match_table = of_match_ptr(mchp48l640_of_table), + }, + .probe = mchp48l640_probe, + .remove = mchp48l640_remove, +}; + +module_spi_driver(mchp48l640_driver); + +MODULE_DESCRIPTION("MTD SPI driver for Microchip 48l640 EERAM chips"); +MODULE_AUTHOR("Heiko Schocher "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("spi:mchp48l640"); diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c index fb4a6aa24543fe..08f76ff839a787 100644 --- a/drivers/mtd/devices/ms02-nv.c +++ b/drivers/mtd/devices/ms02-nv.c @@ -286,7 +286,6 @@ static int __init ms02nv_init(void) break; default: return -ENODEV; - break; } for (i = 0; i < ARRAY_SIZE(ms02nv_addrs); i++) diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c index 5b04ae6c30573b..6ed6c51fac69e8 100644 --- a/drivers/mtd/devices/phram.c +++ b/drivers/mtd/devices/phram.c @@ -270,6 +270,7 @@ static int phram_setup(const char *val) if (len == 0 || erasesize == 0 || erasesize > len || erasesize > UINT_MAX || rem) { parse_err("illegal erasesize or len\n"); + ret = -EINVAL; goto error; } diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c index af16d3485de045..6276daa296dab6 100644 --- a/drivers/mtd/inftlmount.c +++ b/drivers/mtd/inftlmount.c @@ -259,20 +259,13 @@ static int find_boot_record(struct INFTLrecord *inftl) /* Memory alloc */ inftl->PUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16), GFP_KERNEL); - if (!inftl->PUtable) { - printk(KERN_WARNING "INFTL: allocation of PUtable " - "failed (%zd bytes)\n", - inftl->nb_blocks * sizeof(u16)); + if (!inftl->PUtable) return -ENOMEM; - } inftl->VUtable = kmalloc_array(inftl->nb_blocks, sizeof(u16), GFP_KERNEL); if (!inftl->VUtable) { kfree(inftl->PUtable); - printk(KERN_WARNING "INFTL: allocation of VUtable " - "failed (%zd bytes)\n", - inftl->nb_blocks * sizeof(u16)); return -ENOMEM; } @@ -330,7 +323,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address, buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL); if (!buf) - return -1; + return -ENOMEM; ret = -1; for (i = 0; i < len; i += SECTORSIZE) { @@ -558,12 +551,8 @@ int INFTL_mount(struct INFTLrecord *s) /* Temporary buffer to store ANAC numbers. */ ANACtable = kcalloc(s->nb_blocks, sizeof(u8), GFP_KERNEL); - if (!ANACtable) { - printk(KERN_WARNING "INFTL: allocation of ANACtable " - "failed (%zd bytes)\n", - s->nb_blocks * sizeof(u8)); + if (!ANACtable) return -ENOMEM; - } /* * First pass is to explore each physical unit, and construct the diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index 42a95ba40f2cb7..281fcbaa74e70c 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c @@ -189,10 +189,8 @@ static int amd76xrom_init_one(struct pci_dev *pdev, if (!map) { map = kmalloc(sizeof(*map), GFP_KERNEL); - } - if (!map) { - printk(KERN_ERR MOD_NAME ": kmalloc failed"); - goto out; + if (!map) + goto out; } memset(map, 0, sizeof(*map)); INIT_LIST_HEAD(&map->list); diff --git a/drivers/mtd/maps/ck804xrom.c b/drivers/mtd/maps/ck804xrom.c index 460494212f6a7b..c0216bc740cc3b 100644 --- a/drivers/mtd/maps/ck804xrom.c +++ b/drivers/mtd/maps/ck804xrom.c @@ -217,12 +217,10 @@ static int __init ck804xrom_init_one(struct pci_dev *pdev, unsigned long offset; int i; - if (!map) - map = kmalloc(sizeof(*map), GFP_KERNEL); - if (!map) { - printk(KERN_ERR MOD_NAME ": kmalloc failed"); - goto out; + map = kmalloc(sizeof(*map), GFP_KERNEL); + if (!map) + goto out; } memset(map, 0, sizeof(*map)); INIT_LIST_HEAD(&map->list); diff --git a/drivers/mtd/maps/esb2rom.c b/drivers/mtd/maps/esb2rom.c index 85e14150a07306..15d5b76ff50450 100644 --- a/drivers/mtd/maps/esb2rom.c +++ b/drivers/mtd/maps/esb2rom.c @@ -277,11 +277,10 @@ static int __init esb2rom_init_one(struct pci_dev *pdev, unsigned long offset; int i; - if (!map) - map = kmalloc(sizeof(*map), GFP_KERNEL); if (!map) { - printk(KERN_ERR MOD_NAME ": kmalloc failed"); - goto out; + map = kmalloc(sizeof(*map), GFP_KERNEL); + if (!map) + goto out; } memset(map, 0, sizeof(*map)); INIT_LIST_HEAD(&map->list); diff --git a/drivers/mtd/maps/ichxrom.c b/drivers/mtd/maps/ichxrom.c index fda72c5fd8f93a..c8b2793691db0d 100644 --- a/drivers/mtd/maps/ichxrom.c +++ b/drivers/mtd/maps/ichxrom.c @@ -213,10 +213,8 @@ static int __init ichxrom_init_one(struct pci_dev *pdev, if (!map) { map = kmalloc(sizeof(*map), GFP_KERNEL); - } - if (!map) { - printk(KERN_ERR MOD_NAME ": kmalloc failed"); - goto out; + if (!map) + goto out; } memset(map, 0, sizeof(*map)); INIT_LIST_HEAD(&map->list); diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 0bec7c791d17c6..cedd8ef9a6bf5a 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c @@ -127,7 +127,6 @@ static int platram_probe(struct platform_device *pdev) info->map.virt = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(info->map.virt)) { err = PTR_ERR(info->map.virt); - dev_err(&pdev->dev, "failed to ioremap() region\n"); goto exit_free; } diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index f9cfb084c02918..6c0c91bfec058d 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c @@ -62,10 +62,8 @@ int uflash_devinit(struct platform_device *op, struct device_node *dp) } up = kzalloc(sizeof(struct uflash_dev), GFP_KERNEL); - if (!up) { - printk(KERN_ERR PFX "Cannot allocate struct uflash_dev\n"); + if (!up) return -ENOMEM; - } /* copy defaults and tweak parameters */ memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ)); diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 9aaeadd53eb4f7..b5ccd3037788a2 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -96,6 +96,12 @@ static void mtd_release(struct device *dev) device_destroy(&mtd_class, index + 1); } +#define MTD_DEVICE_ATTR_RO(name) \ +static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL) + +#define MTD_DEVICE_ATTR_RW(name) \ +static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store) + static ssize_t mtd_type_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -131,46 +137,45 @@ static ssize_t mtd_type_show(struct device *dev, type = "unknown"; } - return snprintf(buf, PAGE_SIZE, "%s\n", type); + return sysfs_emit(buf, "%s\n", type); } -static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL); +MTD_DEVICE_ATTR_RO(type); static ssize_t mtd_flags_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags); + return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags); } -static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL); +MTD_DEVICE_ATTR_RO(flags); static ssize_t mtd_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%llu\n", - (unsigned long long)mtd->size); + return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size); } -static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL); +MTD_DEVICE_ATTR_RO(size); static ssize_t mtd_erasesize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize); + return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize); } -static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL); +MTD_DEVICE_ATTR_RO(erasesize); static ssize_t mtd_writesize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize); + return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize); } -static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL); +MTD_DEVICE_ATTR_RO(writesize); static ssize_t mtd_subpagesize_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -178,55 +183,54 @@ static ssize_t mtd_subpagesize_show(struct device *dev, struct mtd_info *mtd = dev_get_drvdata(dev); unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft; - return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize); + return sysfs_emit(buf, "%u\n", subpagesize); } -static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL); +MTD_DEVICE_ATTR_RO(subpagesize); static ssize_t mtd_oobsize_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize); + return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize); } -static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL); +MTD_DEVICE_ATTR_RO(oobsize); static ssize_t mtd_oobavail_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%u\n", mtd->oobavail); + return sysfs_emit(buf, "%u\n", mtd->oobavail); } -static DEVICE_ATTR(oobavail, S_IRUGO, mtd_oobavail_show, NULL); +MTD_DEVICE_ATTR_RO(oobavail); static ssize_t mtd_numeraseregions_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions); + return sysfs_emit(buf, "%u\n", mtd->numeraseregions); } -static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show, - NULL); +MTD_DEVICE_ATTR_RO(numeraseregions); static ssize_t mtd_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name); + return sysfs_emit(buf, "%s\n", mtd->name); } -static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); +MTD_DEVICE_ATTR_RO(name); static ssize_t mtd_ecc_strength_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength); + return sysfs_emit(buf, "%u\n", mtd->ecc_strength); } -static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL); +MTD_DEVICE_ATTR_RO(ecc_strength); static ssize_t mtd_bitflip_threshold_show(struct device *dev, struct device_attribute *attr, @@ -234,7 +238,7 @@ static ssize_t mtd_bitflip_threshold_show(struct device *dev, { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold); + return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold); } static ssize_t mtd_bitflip_threshold_store(struct device *dev, @@ -252,60 +256,57 @@ static ssize_t mtd_bitflip_threshold_store(struct device *dev, mtd->bitflip_threshold = bitflip_threshold; return count; } -static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR, - mtd_bitflip_threshold_show, - mtd_bitflip_threshold_store); +MTD_DEVICE_ATTR_RW(bitflip_threshold); static ssize_t mtd_ecc_step_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size); + return sysfs_emit(buf, "%u\n", mtd->ecc_step_size); } -static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL); +MTD_DEVICE_ATTR_RO(ecc_step_size); -static ssize_t mtd_ecc_stats_corrected_show(struct device *dev, +static ssize_t mtd_corrected_bits_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; - return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected); + return sysfs_emit(buf, "%u\n", ecc_stats->corrected); } -static DEVICE_ATTR(corrected_bits, S_IRUGO, - mtd_ecc_stats_corrected_show, NULL); +MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */ -static ssize_t mtd_ecc_stats_errors_show(struct device *dev, +static ssize_t mtd_ecc_failures_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; - return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed); + return sysfs_emit(buf, "%u\n", ecc_stats->failed); } -static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL); +MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */ -static ssize_t mtd_badblocks_show(struct device *dev, +static ssize_t mtd_bad_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; - return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks); + return sysfs_emit(buf, "%u\n", ecc_stats->badblocks); } -static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL); +MTD_DEVICE_ATTR_RO(bad_blocks); -static ssize_t mtd_bbtblocks_show(struct device *dev, +static ssize_t mtd_bbt_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats; - return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks); + return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks); } -static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL); +MTD_DEVICE_ATTR_RO(bbt_blocks); static struct attribute *mtd_attrs[] = { &dev_attr_type.attr, @@ -361,6 +362,7 @@ static struct dentry *dfs_dir_mtd; static void mtd_debugfs_populate(struct mtd_info *mtd) { + struct mtd_info *master = mtd_get_master(mtd); struct device *dev = &mtd->dev; struct dentry *root; @@ -370,12 +372,12 @@ static void mtd_debugfs_populate(struct mtd_info *mtd) root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd); mtd->dbg.dfs_dir = root; - if (mtd->dbg.partid) - debugfs_create_file("partid", 0400, root, mtd, + if (master->dbg.partid) + debugfs_create_file("partid", 0400, root, master, &mtd_partid_debug_fops); - if (mtd->dbg.partname) - debugfs_create_file("partname", 0400, root, mtd, + if (master->dbg.partname) + debugfs_create_file("partname", 0400, root, master, &mtd_partname_debug_fops); } @@ -777,6 +779,148 @@ static void mtd_set_dev_defaults(struct mtd_info *mtd) mutex_init(&mtd->master.chrdev_lock); } +static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user) +{ + struct otp_info *info; + ssize_t size = 0; + unsigned int i; + size_t retlen; + int ret; + + info = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!info) + return -ENOMEM; + + if (is_user) + ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info); + else + ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info); + if (ret) + goto err; + + for (i = 0; i < retlen / sizeof(*info); i++) + size += info[i].length; + + kfree(info); + return size; + +err: + kfree(info); + return ret; +} + +static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd, + const char *compatible, + int size, + nvmem_reg_read_t reg_read) +{ + struct nvmem_device *nvmem = NULL; + struct nvmem_config config = {}; + struct device_node *np; + + /* DT binding is optional */ + np = of_get_compatible_child(mtd->dev.of_node, compatible); + + /* OTP nvmem will be registered on the physical device */ + config.dev = mtd->dev.parent; + /* just reuse the compatible as name */ + config.name = compatible; + config.id = NVMEM_DEVID_NONE; + config.owner = THIS_MODULE; + config.type = NVMEM_TYPE_OTP; + config.root_only = true; + config.reg_read = reg_read; + config.size = size; + config.of_node = np; + config.priv = mtd; + + nvmem = nvmem_register(&config); + /* Just ignore if there is no NVMEM support in the kernel */ + if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP) + nvmem = NULL; + + of_node_put(np); + + return nvmem; +} + +static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset, + void *val, size_t bytes) +{ + struct mtd_info *mtd = priv; + size_t retlen; + int ret; + + ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val); + if (ret) + return ret; + + return retlen == bytes ? 0 : -EIO; +} + +static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset, + void *val, size_t bytes) +{ + struct mtd_info *mtd = priv; + size_t retlen; + int ret; + + ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val); + if (ret) + return ret; + + return retlen == bytes ? 0 : -EIO; +} + +static int mtd_otp_nvmem_add(struct mtd_info *mtd) +{ + struct nvmem_device *nvmem; + ssize_t size; + int err; + + if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) { + size = mtd_otp_size(mtd, true); + if (size < 0) + return size; + + if (size > 0) { + nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size, + mtd_nvmem_user_otp_reg_read); + if (IS_ERR(nvmem)) { + dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n"); + return PTR_ERR(nvmem); + } + mtd->otp_user_nvmem = nvmem; + } + } + + if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) { + size = mtd_otp_size(mtd, false); + if (size < 0) { + err = size; + goto err; + } + + if (size > 0) { + nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size, + mtd_nvmem_fact_otp_reg_read); + if (IS_ERR(nvmem)) { + dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n"); + err = PTR_ERR(nvmem); + goto err; + } + mtd->otp_factory_nvmem = nvmem; + } + } + + return 0; + +err: + if (mtd->otp_user_nvmem) + nvmem_unregister(mtd->otp_user_nvmem); + return err; +} + /** * mtd_device_parse_register - parse partitions and register an MTD device. * @@ -852,6 +996,8 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, register_reboot_notifier(&mtd->reboot_notifier); } + ret = mtd_otp_nvmem_add(mtd); + out: if (ret && device_is_registered(&mtd->dev)) del_mtd_device(mtd); @@ -873,6 +1019,12 @@ int mtd_device_unregister(struct mtd_info *master) if (master->_reboot) unregister_reboot_notifier(&master->reboot_notifier); + if (master->otp_user_nvmem) + nvmem_unregister(master->otp_user_nvmem); + + if (master->otp_factory_nvmem) + nvmem_unregister(master->otp_factory_nvmem); + err = del_mtd_partitions(master); if (err) return err; diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c index 862c4a8892346b..227df24387df5e 100644 --- a/drivers/mtd/mtdoops.c +++ b/drivers/mtd/mtdoops.c @@ -401,10 +401,8 @@ static int __init mtdoops_init(void) cxt->mtd_index = mtd_index; cxt->oops_buf = vmalloc(record_size); - if (!cxt->oops_buf) { - printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n"); + if (!cxt->oops_buf) return -ENOMEM; - } memset(cxt->oops_buf, 0xff, record_size); cxt->oops_buf_busy = 0; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 665fd9020b764d..04af12b66110c6 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -212,15 +212,14 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, return child; } -static ssize_t mtd_partition_offset_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t offset_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct mtd_info *mtd = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE, "%lld\n", mtd->part.offset); + return sysfs_emit(buf, "%lld\n", mtd->part.offset); } - -static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL); +static DEVICE_ATTR_RO(offset); /* mtd partition offset */ static const struct attribute *mtd_partition_attrs[] = { &dev_attr_offset.attr, diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c index 044adf91385465..64af6898131d65 100644 --- a/drivers/mtd/nand/bbt.c +++ b/drivers/mtd/nand/bbt.c @@ -123,7 +123,7 @@ int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, unsigned int rbits = bits_per_block + offs - BITS_PER_LONG; pos[1] &= ~GENMASK(rbits - 1, 0); - pos[1] |= val >> rbits; + pos[1] |= val >> (bits_per_block - rbits); } return 0; diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 30f061939560f1..630728de4b7c9b 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -453,6 +453,14 @@ config MTD_NAND_ROCKCHIP NFC v800: RK3308, RV1108 NFC v900: PX30, RK3326 +config MTD_NAND_PL35X + tristate "ARM PL35X NAND controller" + depends on OF || COMPILE_TEST + depends on PL353_SMC + help + Enables support for PrimeCell SMC PL351 and PL353 NAND + controller found on Zynq7000. + comment "Misc" config MTD_SM_COMMON diff --git a/drivers/mtd/nand/raw/Makefile b/drivers/mtd/nand/raw/Makefile index d011c6c53f8f2f..2f97958c3a3397 100644 --- a/drivers/mtd/nand/raw/Makefile +++ b/drivers/mtd/nand/raw/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_MTD_NAND_CADENCE) += cadence-nand-controller.o obj-$(CONFIG_MTD_NAND_ARASAN) += arasan-nand-controller.o obj-$(CONFIG_MTD_NAND_INTEL_LGM) += intel-nand-controller.o obj-$(CONFIG_MTD_NAND_ROCKCHIP) += rockchip-nand-controller.o +obj-$(CONFIG_MTD_NAND_PL35X) += pl35x-nand-controller.o nand-objs := nand_base.o nand_legacy.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_onfi.o diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 549aac00228ebf..9cbcc698c64d8a 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -53,6 +54,7 @@ #define PROG_RST BIT(8) #define PROG_GET_FEATURE BIT(9) #define PROG_SET_FEATURE BIT(10) +#define PROG_CHG_RD_COL_ENH BIT(14) #define INTR_STS_EN_REG 0x14 #define INTR_SIG_EN_REG 0x18 @@ -70,6 +72,15 @@ #define FLASH_STS_REG 0x28 +#define TIMING_REG 0x2C +#define TCCS_TIME_500NS 0 +#define TCCS_TIME_300NS 3 +#define TCCS_TIME_200NS 2 +#define TCCS_TIME_100NS 1 +#define FAST_TCAD BIT(2) +#define DQS_BUFF_SEL_IN(x) FIELD_PREP(GENMASK(6, 3), (x)) +#define DQS_BUFF_SEL_OUT(x) FIELD_PREP(GENMASK(18, 15), (x)) + #define DATA_PORT_REG 0x30 #define ECC_CONF_REG 0x34 @@ -91,7 +102,7 @@ #define DATA_INTERFACE_REG 0x6C #define DIFACE_SDR_MODE(x) FIELD_PREP(GENMASK(2, 0), (x)) -#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (X)) +#define DIFACE_DDR_MODE(x) FIELD_PREP(GENMASK(5, 3), (x)) #define DIFACE_SDR 0 #define DIFACE_NVDDR BIT(9) @@ -107,6 +118,8 @@ #define ANFC_XLNX_SDR_DFLT_CORE_CLK 100000000 #define ANFC_XLNX_SDR_HS_CORE_CLK 80000000 +static struct gpio_desc *anfc_default_cs_array[2] = {NULL, NULL}; + /** * struct anfc_op - Defines how to execute an operation * @pkt_reg: Packet register @@ -137,11 +150,11 @@ struct anfc_op { * struct anand - Defines the NAND chip related information * @node: Used to store NAND chips into a list * @chip: NAND chip information structure - * @cs: Chip select line * @rb: Ready-busy line * @page_sz: Register value of the page_sz field to use * @clk: Expected clock frequency to use - * @timings: Data interface timing mode to use + * @data_iface: Data interface timing mode to use + * @timings: NV-DDR specific timings to use * @ecc_conf: Hardware ECC configuration value * @strength: Register value of the ECC strength * @raddr_cycles: Row address cycle information @@ -151,14 +164,17 @@ struct anfc_op { * @errloc: Array of errors located with soft BCH * @hw_ecc: Buffer to store syndromes computed by hardware * @bch: BCH structure + * @cs_idx: Array of chip-select for this device, values are indexes + * of the controller structure @gpio_cs array + * @ncs_idx: Size of the @cs_idx array */ struct anand { struct list_head node; struct nand_chip chip; - unsigned int cs; unsigned int rb; unsigned int page_sz; unsigned long clk; + u32 data_iface; u32 timings; u32 ecc_conf; u32 strength; @@ -169,6 +185,8 @@ struct anand { unsigned int *errloc; u8 *hw_ecc; struct bch_control *bch; + int *cs_idx; + int ncs_idx; }; /** @@ -179,8 +197,14 @@ struct anand { * @bus_clk: Pointer to the flash clock * @controller: Base controller structure * @chips: List of all NAND chips attached to the controller - * @assigned_cs: Bitmask describing already assigned CS lines * @cur_clk: Current clock rate + * @cs_array: CS array. Native CS are left empty, the other cells are + * populated with their corresponding GPIO descriptor. + * @ncs: Size of @cs_array + * @cur_cs: Index in @cs_array of the currently in use CS + * @native_cs: Currently selected native CS + * @spare_cs: Native CS that is not wired (may be selected when a GPIO + * CS is in use) */ struct arasan_nfc { struct device *dev; @@ -189,8 +213,12 @@ struct arasan_nfc { struct clk *bus_clk; struct nand_controller controller; struct list_head chips; - unsigned long assigned_cs; unsigned int cur_clk; + struct gpio_desc **cs_array; + unsigned int ncs; + int cur_cs; + unsigned int native_cs; + unsigned int spare_cs; }; static struct anand *to_anand(struct nand_chip *nand) @@ -273,6 +301,72 @@ static int anfc_pkt_len_config(unsigned int len, unsigned int *steps, return 0; } +static bool anfc_is_gpio_cs(struct arasan_nfc *nfc, int nfc_cs) +{ + return nfc_cs >= 0 && nfc->cs_array[nfc_cs]; +} + +static int anfc_relative_to_absolute_cs(struct anand *anand, int num) +{ + return anand->cs_idx[num]; +} + +static void anfc_assert_cs(struct arasan_nfc *nfc, unsigned int nfc_cs_idx) +{ + /* CS did not change: do nothing */ + if (nfc->cur_cs == nfc_cs_idx) + return; + + /* Deassert the previous CS if it was a GPIO */ + if (anfc_is_gpio_cs(nfc, nfc->cur_cs)) + gpiod_set_value_cansleep(nfc->cs_array[nfc->cur_cs], 1); + + /* Assert the new one */ + if (anfc_is_gpio_cs(nfc, nfc_cs_idx)) { + nfc->native_cs = nfc->spare_cs; + gpiod_set_value_cansleep(nfc->cs_array[nfc_cs_idx], 0); + } else { + nfc->native_cs = nfc_cs_idx; + } + + nfc->cur_cs = nfc_cs_idx; +} + +static int anfc_select_target(struct nand_chip *chip, int target) +{ + struct anand *anand = to_anand(chip); + struct arasan_nfc *nfc = to_anfc(chip->controller); + unsigned int nfc_cs_idx = anfc_relative_to_absolute_cs(anand, target); + int ret; + + anfc_assert_cs(nfc, nfc_cs_idx); + + /* Update the controller timings and the potential ECC configuration */ + writel_relaxed(anand->data_iface, nfc->base + DATA_INTERFACE_REG); + writel_relaxed(anand->timings, nfc->base + TIMING_REG); + + /* Update clock frequency */ + if (nfc->cur_clk != anand->clk) { + clk_disable_unprepare(nfc->controller_clk); + ret = clk_set_rate(nfc->controller_clk, anand->clk); + if (ret) { + dev_err(nfc->dev, "Failed to change clock rate\n"); + return ret; + } + + ret = clk_prepare_enable(nfc->controller_clk); + if (ret) { + dev_err(nfc->dev, + "Failed to re-enable the controller clock\n"); + return ret; + } + + nfc->cur_clk = anand->clk; + } + + return 0; +} + /* * When using the embedded hardware ECC engine, the controller is in charge of * feeding the engine with, first, the ECC residue present in the data array. @@ -315,7 +409,7 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf, .addr2_reg = ((page >> 16) & 0xFF) | ADDR2_STRENGTH(anand->strength) | - ADDR2_CS(anand->cs), + ADDR2_CS(nfc->native_cs), .cmd_reg = CMD_1(NAND_CMD_READ0) | CMD_2(NAND_CMD_READSTART) | @@ -401,6 +495,18 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf, return 0; } +static int anfc_sel_read_page_hw_ecc(struct nand_chip *chip, u8 *buf, + int oob_required, int page) +{ + int ret; + + ret = anfc_select_target(chip, chip->cur_cs); + if (ret) + return ret; + + return anfc_read_page_hw_ecc(chip, buf, oob_required, page); +}; + static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { @@ -420,7 +526,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, .addr2_reg = ((page >> 16) & 0xFF) | ADDR2_STRENGTH(anand->strength) | - ADDR2_CS(anand->cs), + ADDR2_CS(nfc->native_cs), .cmd_reg = CMD_1(NAND_CMD_SEQIN) | CMD_2(NAND_CMD_PAGEPROG) | @@ -461,11 +567,24 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, return ret; } +static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf, + int oob_required, int page) +{ + int ret; + + ret = anfc_select_target(chip, chip->cur_cs); + if (ret) + return ret; + + return anfc_write_page_hw_ecc(chip, buf, oob_required, page); +}; + /* NAND framework ->exec_op() hooks and related helpers */ static int anfc_parse_instructions(struct nand_chip *chip, const struct nand_subop *subop, struct anfc_op *nfc_op) { + struct arasan_nfc *nfc = to_anfc(chip->controller); struct anand *anand = to_anand(chip); const struct nand_op_instr *instr = NULL; bool first_cmd = true; @@ -473,7 +592,7 @@ static int anfc_parse_instructions(struct nand_chip *chip, int ret, i; memset(nfc_op, 0, sizeof(*nfc_op)); - nfc_op->addr2_reg = ADDR2_CS(anand->cs); + nfc_op->addr2_reg = ADDR2_CS(nfc->native_cs); nfc_op->cmd_reg = CMD_PAGE_SIZE(anand->page_sz); for (op_id = 0; op_id < subop->ninstrs; op_id++) { @@ -622,7 +741,23 @@ static int anfc_param_read_type_exec(struct nand_chip *chip, static int anfc_data_read_type_exec(struct nand_chip *chip, const struct nand_subop *subop) { - return anfc_misc_data_type_exec(chip, subop, PROG_PGRD); + u32 prog_reg = PROG_PGRD; + + /* + * Experience shows that while in SDR mode sending a CHANGE READ COLUMN + * command through the READ PAGE "type" always works fine, when in + * NV-DDR mode the same command simply fails. However, it was also + * spotted that any CHANGE READ COLUMN command sent through the CHANGE + * READ COLUMN ENHANCED "type" would correctly work in both cases (SDR + * and NV-DDR). So, for simplicity, let's program the controller with + * the CHANGE READ COLUMN ENHANCED "type" whenever we are requested to + * perform a CHANGE READ COLUMN operation. + */ + if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_RNDOUT && + subop->instrs[2].ctx.cmd.opcode == NAND_CMD_RNDOUTSTART) + prog_reg = PROG_CHG_RD_COL_ENH; + + return anfc_misc_data_type_exec(chip, subop, prog_reg); } static int anfc_param_write_type_exec(struct nand_chip *chip, @@ -753,37 +888,6 @@ static const struct nand_op_parser anfc_op_parser = NAND_OP_PARSER( NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)), ); -static int anfc_select_target(struct nand_chip *chip, int target) -{ - struct anand *anand = to_anand(chip); - struct arasan_nfc *nfc = to_anfc(chip->controller); - int ret; - - /* Update the controller timings and the potential ECC configuration */ - writel_relaxed(anand->timings, nfc->base + DATA_INTERFACE_REG); - - /* Update clock frequency */ - if (nfc->cur_clk != anand->clk) { - clk_disable_unprepare(nfc->controller_clk); - ret = clk_set_rate(nfc->controller_clk, anand->clk); - if (ret) { - dev_err(nfc->dev, "Failed to change clock rate\n"); - return ret; - } - - ret = clk_prepare_enable(nfc->controller_clk); - if (ret) { - dev_err(nfc->dev, - "Failed to re-enable the controller clock\n"); - return ret; - } - - nfc->cur_clk = anand->clk; - } - - return 0; -} - static int anfc_check_op(struct nand_chip *chip, const struct nand_operation *op) { @@ -861,21 +965,79 @@ static int anfc_setup_interface(struct nand_chip *chip, int target, struct anand *anand = to_anand(chip); struct arasan_nfc *nfc = to_anfc(chip->controller); struct device_node *np = nfc->dev->of_node; + const struct nand_sdr_timings *sdr; + const struct nand_nvddr_timings *nvddr; + unsigned int tccs_min, dqs_mode, fast_tcad; + + if (nand_interface_is_nvddr(conf)) { + nvddr = nand_get_nvddr_timings(conf); + if (IS_ERR(nvddr)) + return PTR_ERR(nvddr); + } else { + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + } if (target < 0) return 0; - anand->timings = DIFACE_SDR | DIFACE_SDR_MODE(conf->timings.mode); + if (nand_interface_is_sdr(conf)) { + anand->data_iface = DIFACE_SDR | + DIFACE_SDR_MODE(conf->timings.mode); + anand->timings = 0; + } else { + anand->data_iface = DIFACE_NVDDR | + DIFACE_DDR_MODE(conf->timings.mode); + + if (conf->timings.nvddr.tCCS_min <= 100000) + tccs_min = TCCS_TIME_100NS; + else if (conf->timings.nvddr.tCCS_min <= 200000) + tccs_min = TCCS_TIME_200NS; + else if (conf->timings.nvddr.tCCS_min <= 300000) + tccs_min = TCCS_TIME_300NS; + else + tccs_min = TCCS_TIME_500NS; + + fast_tcad = 0; + if (conf->timings.nvddr.tCAD_min < 45000) + fast_tcad = FAST_TCAD; + + switch (conf->timings.mode) { + case 5: + case 4: + dqs_mode = 2; + break; + case 3: + dqs_mode = 3; + break; + case 2: + dqs_mode = 4; + break; + case 1: + dqs_mode = 5; + break; + case 0: + default: + dqs_mode = 6; + break; + } + + anand->timings = tccs_min | fast_tcad | + DQS_BUFF_SEL_IN(dqs_mode) | + DQS_BUFF_SEL_OUT(dqs_mode); + } + anand->clk = ANFC_XLNX_SDR_DFLT_CORE_CLK; /* * Due to a hardware bug in the ZynqMP SoC, SDR timing modes 0-1 work * with f > 90MHz (default clock is 100MHz) but signals are unstable * with higher modes. Hence we decrease a little bit the clock rate to - * 80MHz when using modes 2-5 with this SoC. + * 80MHz when using SDR modes 2-5 with this SoC. */ if (of_device_is_compatible(np, "xlnx,zynqmp-nand-controller") && - conf->timings.mode >= 2) + nand_interface_is_sdr(conf) && conf->timings.mode >= 2) anand->clk = ANFC_XLNX_SDR_HS_CORE_CLK; return 0; @@ -1007,8 +1169,8 @@ static int anfc_init_hw_ecc_controller(struct arasan_nfc *nfc, if (!anand->bch) return -EINVAL; - ecc->read_page = anfc_read_page_hw_ecc; - ecc->write_page = anfc_write_page_hw_ecc; + ecc->read_page = anfc_sel_read_page_hw_ecc; + ecc->write_page = anfc_sel_write_page_hw_ecc; return 0; } @@ -1094,37 +1256,43 @@ static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np) struct anand *anand; struct nand_chip *chip; struct mtd_info *mtd; - int cs, rb, ret; + int rb, ret, i; anand = devm_kzalloc(nfc->dev, sizeof(*anand), GFP_KERNEL); if (!anand) return -ENOMEM; - /* We do not support multiple CS per chip yet */ - if (of_property_count_elems_of_size(np, "reg", sizeof(u32)) != 1) { + /* Chip-select init */ + anand->ncs_idx = of_property_count_elems_of_size(np, "reg", sizeof(u32)); + if (anand->ncs_idx <= 0 || anand->ncs_idx > nfc->ncs) { dev_err(nfc->dev, "Invalid reg property\n"); return -EINVAL; } - ret = of_property_read_u32(np, "reg", &cs); - if (ret) - return ret; + anand->cs_idx = devm_kcalloc(nfc->dev, anand->ncs_idx, + sizeof(*anand->cs_idx), GFP_KERNEL); + if (!anand->cs_idx) + return -ENOMEM; + for (i = 0; i < anand->ncs_idx; i++) { + ret = of_property_read_u32_index(np, "reg", i, + &anand->cs_idx[i]); + if (ret) { + dev_err(nfc->dev, "invalid CS property: %d\n", ret); + return ret; + } + } + + /* Ready-busy init */ ret = of_property_read_u32(np, "nand-rb", &rb); if (ret) return ret; - if (cs >= ANFC_MAX_CS || rb >= ANFC_MAX_CS) { - dev_err(nfc->dev, "Wrong CS %d or RB %d\n", cs, rb); - return -EINVAL; - } - - if (test_and_set_bit(cs, &nfc->assigned_cs)) { - dev_err(nfc->dev, "Already assigned CS %d\n", cs); + if (rb >= ANFC_MAX_CS) { + dev_err(nfc->dev, "Wrong RB %d\n", rb); return -EINVAL; } - anand->cs = cs; anand->rb = rb; chip = &anand->chip; @@ -1140,7 +1308,7 @@ static int anfc_chip_init(struct arasan_nfc *nfc, struct device_node *np) return -EINVAL; } - ret = nand_scan(chip, 1); + ret = nand_scan(chip, anand->ncs_idx); if (ret) { dev_err(nfc->dev, "Scan operation failed\n"); return ret; @@ -1178,7 +1346,7 @@ static int anfc_chips_init(struct arasan_nfc *nfc) int nchips = of_get_child_count(np); int ret; - if (!nchips || nchips > ANFC_MAX_CS) { + if (!nchips) { dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n", nchips); return -EINVAL; @@ -1203,6 +1371,47 @@ static void anfc_reset(struct arasan_nfc *nfc) /* Enable interrupt status */ writel_relaxed(EVENT_MASK, nfc->base + INTR_STS_EN_REG); + + nfc->cur_cs = -1; +} + +static int anfc_parse_cs(struct arasan_nfc *nfc) +{ + int ret; + + /* Check the gpio-cs property */ + ret = rawnand_dt_parse_gpio_cs(nfc->dev, &nfc->cs_array, &nfc->ncs); + if (ret) + return ret; + + /* + * The controller native CS cannot be both disabled at the same time. + * Hence, only one native CS can be used if GPIO CS are needed, so that + * the other is selected when a non-native CS must be asserted (not + * wired physically or configured as GPIO instead of NAND CS). In this + * case, the "not" chosen CS is assigned to nfc->spare_cs and selected + * whenever a GPIO CS must be asserted. + */ + if (nfc->cs_array && nfc->ncs > 2) { + if (!nfc->cs_array[0] && !nfc->cs_array[1]) { + dev_err(nfc->dev, + "Assign a single native CS when using GPIOs\n"); + return -EINVAL; + } + + if (nfc->cs_array[0]) + nfc->spare_cs = 0; + else + nfc->spare_cs = 1; + } + + if (!nfc->cs_array) { + nfc->cs_array = anfc_default_cs_array; + nfc->ncs = ANFC_MAX_CS; + return 0; + } + + return 0; } static int anfc_probe(struct platform_device *pdev) @@ -1241,6 +1450,14 @@ static int anfc_probe(struct platform_device *pdev) if (ret) goto disable_controller_clk; + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) + goto disable_bus_clk; + + ret = anfc_parse_cs(nfc); + if (ret) + goto disable_bus_clk; + ret = anfc_chips_init(nfc); if (ret) goto disable_bus_clk; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 8aab1017b46003..f3276ee9e4fe73 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -1246,7 +1246,7 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, nc = to_nand_controller(nand->base.controller); /* DDR interface not supported. */ - if (conf->type != NAND_SDR_IFACE) + if (!nand_interface_is_sdr(conf)) return -ENOTSUPP; /* @@ -1524,8 +1524,13 @@ static int atmel_nand_setup_interface(struct nand_chip *chip, int csline, const struct nand_interface_config *conf) { struct atmel_nand *nand = to_atmel_nand(chip); + const struct nand_sdr_timings *sdr; struct atmel_nand_controller *nc; + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + nc = to_nand_controller(nand->base.controller); if (csline >= nand->numcs || @@ -1629,10 +1634,8 @@ static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc, } nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL); - if (!nand) { - dev_err(nc->dev, "Failed to allocate NAND object\n"); + if (!nand) return ERR_PTR(-ENOMEM); - } nand->numcs = numcs; diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index b46786cd53e0bc..7eec60ea905647 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -2348,9 +2348,9 @@ cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, * for tRP and tRH timings. If it is NOT possible to sample data * with optimal tRP/tRH settings, the parameters will be extended. * If clk_period is 50ns (the lowest value) this condition is met - * for asynchronous timing modes 1, 2, 3, 4 and 5. - * If clk_period is 20ns the condition is met only - * for asynchronous timing mode 5. + * for SDR timing modes 1, 2, 3, 4 and 5. + * If clk_period is 20ns the condition is met only for SDR timing + * mode 5. */ if (sdr->tRC_min <= clk_period && sdr->tRP_min <= (clk_period / 2) && diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h index fdc5ed7de083fc..5e1c3ddae5f83a 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.h @@ -79,7 +79,7 @@ enum gpmi_type { struct gpmi_devdata { enum gpmi_type type; int bch_max_ecc_strength; - int max_chain_delay; /* See the async EDO mode */ + int max_chain_delay; /* See the SDR EDO mode */ const char * const *clks; const int clks_count; }; diff --git a/drivers/mtd/nand/raw/hisi504_nand.c b/drivers/mtd/nand/raw/hisi504_nand.c index 8b2122ce6ec359..78c4e05434e2bd 100644 --- a/drivers/mtd/nand/raw/hisi504_nand.c +++ b/drivers/mtd/nand/raw/hisi504_nand.c @@ -761,10 +761,8 @@ static int hisi_nfc_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); host->mmio = devm_ioremap_resource(dev, res); - if (IS_ERR(host->mmio)) { - dev_err(dev, "devm_ioremap_resource[1] fail\n"); + if (IS_ERR(host->mmio)) return PTR_ERR(host->mmio); - } mtd->name = "hisi_nand"; mtd->dev.parent = &pdev->dev; diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 012876e14317a9..7016e0f38398e2 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -90,9 +90,14 @@ void onfi_fill_interface_config(struct nand_chip *chip, unsigned int timing_mode); unsigned int onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings); +unsigned int +onfi_find_closest_nvddr_mode(const struct nand_nvddr_timings *spec_timings); int nand_choose_best_sdr_timings(struct nand_chip *chip, struct nand_interface_config *iface, struct nand_sdr_timings *spec_timings); +int nand_choose_best_nvddr_timings(struct nand_chip *chip, + struct nand_interface_config *iface, + struct nand_nvddr_timings *spec_timings); const struct nand_interface_config *nand_get_reset_interface_config(void); int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 79da6b02e20958..2455a581fd70cf 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -451,7 +451,7 @@ struct marvell_nfc_timings { }; /** - * Derives a duration in numbers of clock cycles. + * TO_CYCLES() - Derives a duration in numbers of clock cycles. * * @ps: Duration in pico-seconds * @period_ns: Clock period in nano-seconds @@ -3030,8 +3030,10 @@ static int __maybe_unused marvell_nfc_resume(struct device *dev) return ret; ret = clk_prepare_enable(nfc->reg_clk); - if (ret < 0) + if (ret < 0) { + clk_disable_unprepare(nfc->core_clk); return ret; + } /* * Reset nfc->selected_chip so the next command will cause the timing diff --git a/drivers/mtd/nand/raw/mtk_ecc.c b/drivers/mtd/nand/raw/mtk_ecc.c index 75f1fa3d4d35ca..c437d97debb8a7 100644 --- a/drivers/mtd/nand/raw/mtk_ecc.c +++ b/drivers/mtd/nand/raw/mtk_ecc.c @@ -515,10 +515,8 @@ static int mtk_ecc_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ecc->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(ecc->regs)) { - dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs)); + if (IS_ERR(ecc->regs)) return PTR_ERR(ecc->regs); - } ecc->clk = devm_clk_get(dev, NULL); if (IS_ERR(ecc->clk)) { diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index fb072c44449501..57a583149cc0cb 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include "internals.h" @@ -647,7 +648,7 @@ static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt) */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) { - const struct nand_sdr_timings *timings; + const struct nand_interface_config *conf; u8 status = 0; int ret; @@ -655,8 +656,8 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) return -ENOTSUPP; /* Wait tWB before polling the STATUS reg. */ - timings = nand_get_sdr_timings(nand_get_interface_config(chip)); - ndelay(PSEC_TO_NSEC(timings->tWB_max)); + conf = nand_get_interface_config(chip); + ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max)); ret = nand_status_op(chip, NULL); if (ret) @@ -832,7 +833,7 @@ static int nand_reset_interface(struct nand_chip *chip, int chipnr) static int nand_setup_interface(struct nand_chip *chip, int chipnr) { const struct nand_controller_ops *ops = chip->controller->ops; - u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }; + u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request; int ret; if (!nand_controller_can_setup_interface(chip)) @@ -848,7 +849,12 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr) if (!chip->best_interface_config) return 0; - tmode_param[0] = chip->best_interface_config->timings.mode; + request = chip->best_interface_config->timings.mode; + if (nand_interface_is_sdr(chip->best_interface_config)) + request |= ONFI_DATA_INTERFACE_SDR; + else + request |= ONFI_DATA_INTERFACE_NVDDR; + tmode_param[0] = request; /* Change the mode on the chip side (if supported by the NAND chip) */ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { @@ -877,9 +883,13 @@ static int nand_setup_interface(struct nand_chip *chip, int chipnr) if (ret) goto err_reset_chip; - if (tmode_param[0] != chip->best_interface_config->timings.mode) { - pr_warn("timing mode %d not acknowledged by the NAND chip\n", + if (request != tmode_param[0]) { + pr_warn("%s timing mode %d not acknowledged by the NAND chip\n", + nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR", chip->best_interface_config->timings.mode); + pr_debug("NAND chip would work in %s timing mode %d\n", + tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR", + (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0])); goto err_reset_chip; } @@ -935,7 +945,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, /* Fallback to slower modes */ best_mode = iface->timings.mode; } else if (chip->parameters.onfi) { - best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1; + best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1; } for (mode = best_mode; mode >= 0; mode--) { @@ -943,13 +953,87 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, iface); - if (!ret) + if (!ret) { + chip->best_interface_config = iface; break; + } } - chip->best_interface_config = iface; + return ret; +} - return 0; +/** + * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the + * NAND controller and the NAND chip support + * @chip: the NAND chip + * @iface: the interface configuration (can eventually be updated) + * @spec_timings: specific timings, when not fitting the ONFI specification + * + * If specific timings are provided, use them. Otherwise, retrieve supported + * timing modes from ONFI information. + */ +int nand_choose_best_nvddr_timings(struct nand_chip *chip, + struct nand_interface_config *iface, + struct nand_nvddr_timings *spec_timings) +{ + const struct nand_controller_ops *ops = chip->controller->ops; + int best_mode = 0, mode, ret; + + iface->type = NAND_NVDDR_IFACE; + + if (spec_timings) { + iface->timings.nvddr = *spec_timings; + iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings); + + /* Verify the controller supports the requested interface */ + ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, + iface); + if (!ret) { + chip->best_interface_config = iface; + return ret; + } + + /* Fallback to slower modes */ + best_mode = iface->timings.mode; + } else if (chip->parameters.onfi) { + best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1; + } + + for (mode = best_mode; mode >= 0; mode--) { + onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode); + + ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, + iface); + if (!ret) { + chip->best_interface_config = iface; + break; + } + } + + return ret; +} + +/** + * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both + * NAND controller and the NAND chip support + * @chip: the NAND chip + * @iface: the interface configuration (can eventually be updated) + * + * If specific timings are provided, use them. Otherwise, retrieve supported + * timing modes from ONFI information. + */ +static int nand_choose_best_timings(struct nand_chip *chip, + struct nand_interface_config *iface) +{ + int ret; + + /* Try the fastest timings: NV-DDR */ + ret = nand_choose_best_nvddr_timings(chip, iface, NULL); + if (!ret) + return 0; + + /* Fallback to SDR timings otherwise */ + return nand_choose_best_sdr_timings(chip, iface, NULL); } /** @@ -980,7 +1064,7 @@ static int nand_choose_interface_config(struct nand_chip *chip) if (chip->ops.choose_interface_config) ret = chip->ops.choose_interface_config(chip, iface); else - ret = nand_choose_best_sdr_timings(chip, iface, NULL); + ret = nand_choose_best_timings(chip, iface); if (ret) kfree(iface); @@ -1046,15 +1130,15 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[4]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), - NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), - PSEC_TO_NSEC(sdr->tRR_min)), + NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), + NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1089,15 +1173,15 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); u8 addrs[5]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), NAND_OP_ADDR(4, addrs, 0), - NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), - PSEC_TO_NSEC(sdr->tRR_min)), + NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), + NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1186,13 +1270,14 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, return -EINVAL; if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PARAM, 0), - NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max), - PSEC_TO_NSEC(sdr->tRR_min)), + NAND_OP_ADDR(1, &page, + NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), + NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_8BIT_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1241,14 +1326,14 @@ int nand_change_read_column_op(struct nand_chip *chip, return -ENOTSUPP; if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); u8 addrs[2] = {}; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDOUT, 0), NAND_OP_ADDR(2, addrs, 0), NAND_OP_CMD(NAND_CMD_RNDOUTSTART, - PSEC_TO_NSEC(sdr->tCCS_min)), + NAND_COMMON_TIMING_NS(conf, tCCS_min)), NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1316,8 +1401,8 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len, bool prog) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[5] = {}; struct nand_op_instr instrs[] = { @@ -1328,10 +1413,11 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, */ NAND_OP_CMD(NAND_CMD_READ0, 0), NAND_OP_CMD(NAND_CMD_SEQIN, 0), - NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)), + NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)), NAND_OP_DATA_OUT(len, buf, 0), - NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0), + NAND_OP_CMD(NAND_CMD_PAGEPROG, + NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page); @@ -1430,12 +1516,13 @@ int nand_prog_page_end_op(struct nand_chip *chip) u8 status; if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PAGEPROG, - PSEC_TO_NSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0), + NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), + 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1548,12 +1635,12 @@ int nand_change_write_column_op(struct nand_chip *chip, return -ENOTSUPP; if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); u8 addrs[2]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDIN, 0), - NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)), + NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)), NAND_OP_DATA_OUT(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1597,26 +1684,46 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, unsigned int len) { unsigned int i; - u8 *id = buf; + u8 *id = buf, *ddrbuf = NULL; if (len && !buf) return -EINVAL; if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READID, 0), - NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)), + NAND_OP_ADDR(1, &addr, + NAND_COMMON_TIMING_NS(conf, tADL_min)), NAND_OP_8BIT_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); + int ret; + + /* READ_ID data bytes are received twice in NV-DDR mode */ + if (len && nand_interface_is_nvddr(conf)) { + ddrbuf = kzalloc(len * 2, GFP_KERNEL); + if (!ddrbuf) + return -ENOMEM; + + instrs[2].ctx.data.len *= 2; + instrs[2].ctx.data.buf.in = ddrbuf; + } /* Drop the DATA_IN instruction if len is set to 0. */ if (!len) op.ninstrs--; - return nand_exec_op(chip, &op); + ret = nand_exec_op(chip, &op); + if (!ret && len && nand_interface_is_nvddr(conf)) { + for (i = 0; i < len; i++) + id[i] = ddrbuf[i * 2]; + } + + kfree(ddrbuf); + + return ret; } chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1); @@ -1642,19 +1749,31 @@ EXPORT_SYMBOL_GPL(nand_readid_op); int nand_status_op(struct nand_chip *chip, u8 *status) { if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); + u8 ddrstatus[2]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_STATUS, - PSEC_TO_NSEC(sdr->tADL_min)), + NAND_COMMON_TIMING_NS(conf, tADL_min)), NAND_OP_8BIT_DATA_IN(1, status, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); + int ret; + + /* The status data byte will be received twice in NV-DDR mode */ + if (status && nand_interface_is_nvddr(conf)) { + instrs[1].ctx.data.len *= 2; + instrs[1].ctx.data.buf.in = ddrstatus; + } if (!status) op.ninstrs--; - return nand_exec_op(chip, &op); + ret = nand_exec_op(chip, &op); + if (!ret && status && nand_interface_is_nvddr(conf)) + *status = ddrstatus[0]; + + return ret; } chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1); @@ -1711,15 +1830,16 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) u8 status; if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); u8 addrs[3] = { page, page >> 8, page >> 16 }; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_ERASE1, 0), NAND_OP_ADDR(2, addrs, 0), NAND_OP_CMD(NAND_CMD_ERASE2, - PSEC_TO_MSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0), + NAND_COMMON_TIMING_MS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), + 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1770,14 +1890,17 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature, int i, ret; if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), - NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)), + NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf, + tADL_min)), NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data, - PSEC_TO_NSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0), + NAND_COMMON_TIMING_NS(conf, + tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), + 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1813,23 +1936,37 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature, static int nand_get_features_op(struct nand_chip *chip, u8 feature, void *data) { - u8 *params = data; + u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2]; int i; if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), - NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), - PSEC_TO_NSEC(sdr->tRR_min)), + NAND_OP_ADDR(1, &feature, + NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max), + NAND_COMMON_TIMING_NS(conf, tRR_min)), NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN, data, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); + int ret; - return nand_exec_op(chip, &op); + /* GET_FEATURE data bytes are received twice in NV-DDR mode */ + if (nand_interface_is_nvddr(conf)) { + instrs[3].ctx.data.len *= 2; + instrs[3].ctx.data.buf.in = ddrbuf; + } + + ret = nand_exec_op(chip, &op); + if (nand_interface_is_nvddr(conf)) { + for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++) + params[i] = ddrbuf[i * 2]; + } + + return ret; } chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1); @@ -1874,11 +2011,13 @@ static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms, int nand_reset_op(struct nand_chip *chip) { if (nand_has_exec_op(chip)) { - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(nand_get_interface_config(chip)); + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct nand_op_instr instrs[] = { - NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)), - NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0), + NAND_OP_CMD(NAND_CMD_RESET, + NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max), + 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); @@ -1913,17 +2052,50 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, return -EINVAL; if (nand_has_exec_op(chip)) { + const struct nand_interface_config *conf = + nand_get_interface_config(chip); struct nand_op_instr instrs[] = { NAND_OP_DATA_IN(len, buf, 0), }; struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs); + u8 *ddrbuf = NULL; + int ret, i; instrs[0].ctx.data.force_8bit = force_8bit; - if (check_only) - return nand_check_op(chip, &op); + /* + * Parameter payloads (ID, status, features, etc) do not go + * through the same pipeline as regular data, hence the + * force_8bit flag must be set and this also indicates that in + * case NV-DDR timings are being used the data will be received + * twice. + */ + if (force_8bit && nand_interface_is_nvddr(conf)) { + ddrbuf = kzalloc(len * 2, GFP_KERNEL); + if (!ddrbuf) + return -ENOMEM; - return nand_exec_op(chip, &op); + instrs[0].ctx.data.len *= 2; + instrs[0].ctx.data.buf.in = ddrbuf; + } + + if (check_only) { + ret = nand_check_op(chip, &op); + kfree(ddrbuf); + return ret; + } + + ret = nand_exec_op(chip, &op); + if (!ret && force_8bit && nand_interface_is_nvddr(conf)) { + u8 *dst = buf; + + for (i = 0; i < len; i++) + dst[i] = ddrbuf[i * 2]; + } + + kfree(ddrbuf); + + return ret; } if (check_only) @@ -3136,13 +3308,13 @@ static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) static void nand_wait_readrdy(struct nand_chip *chip) { - const struct nand_sdr_timings *sdr; + const struct nand_interface_config *conf; if (!(chip->options & NAND_NEED_READRDY)) return; - sdr = nand_get_sdr_timings(nand_get_interface_config(chip)); - WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0)); + conf = nand_get_interface_config(chip); + WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0)); } /** @@ -5078,6 +5250,44 @@ static int of_get_nand_secure_regions(struct nand_chip *chip) return 0; } +/** + * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller + * @dev: Device that will be parsed. Also used for managed allocations. + * @cs_array: Array of GPIO desc pointers allocated on success + * @ncs_array: Number of entries in @cs_array updated on success. + * @return 0 on success, an error otherwise. + */ +int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, + unsigned int *ncs_array) +{ + struct device_node *np = dev->of_node; + struct gpio_desc **descs; + int ndescs, i; + + ndescs = of_gpio_named_count(np, "cs-gpios"); + if (ndescs < 0) { + dev_dbg(dev, "No valid cs-gpios property\n"); + return 0; + } + + descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL); + if (!descs) + return -ENOMEM; + + for (i = 0; i < ndescs; i++) { + descs[i] = gpiod_get_index_optional(dev, "cs", i, + GPIOD_OUT_HIGH); + if (IS_ERR(descs[i])) + return PTR_ERR(descs[i]); + } + + *ncs_array = ndescs; + *cs_array = descs; + + return 0; +} +EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs); + static int rawnand_dt_init(struct nand_chip *chip) { struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip)); diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index eccc18b266d5f5..743792edf98d63 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -369,7 +369,7 @@ static void nand_ccs_delay(struct nand_chip *chip) * Wait tCCS_min if it is correctly defined, otherwise wait 500ns * (which should be safe for all NANDs). */ - if (nand_controller_can_setup_interface(chip)) + if (!IS_ERR(sdr) && nand_controller_can_setup_interface(chip)) ndelay(sdr->tCCS_min / 1000); else ndelay(500); diff --git a/drivers/mtd/nand/raw/nand_onfi.c b/drivers/mtd/nand/raw/nand_onfi.c index 45649e03797d86..7586befce7f920 100644 --- a/drivers/mtd/nand/raw/nand_onfi.c +++ b/drivers/mtd/nand/raw/nand_onfi.c @@ -315,7 +315,10 @@ int nand_onfi_detect(struct nand_chip *chip) onfi->tBERS = le16_to_cpu(p->t_bers); onfi->tR = le16_to_cpu(p->t_r); onfi->tCCS = le16_to_cpu(p->t_ccs); - onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode); + onfi->fast_tCAD = le16_to_cpu(p->nvddr_nvddr2_features) & BIT(0); + onfi->sdr_timing_modes = le16_to_cpu(p->sdr_timing_modes); + if (le16_to_cpu(p->features) & ONFI_FEATURE_NV_DDR) + onfi->nvddr_timing_modes = le16_to_cpu(p->nvddr_timing_modes); onfi->vendor_revision = le16_to_cpu(p->vendor_revision); memcpy(onfi->vendor, p->vendor, sizeof(p->vendor)); chip->parameters.onfi = onfi; diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index 94d832646487d5..7b41afc372d2b8 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -292,6 +292,261 @@ static const struct nand_interface_config onfi_sdr_timings[] = { }, }; +static const struct nand_interface_config onfi_nvddr_timings[] = { + /* Mode 0 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 0, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 10000, + .tCALH_min = 10000, + .tCALS_min = 10000, + .tCAS_min = 10000, + .tCEH_min = 20000, + .tCH_min = 10000, + .tCK_min = 50000, + .tCS_min = 35000, + .tDH_min = 5000, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 5000, + .tDS_min = 5000, + .tDSC_min = 50000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 6000, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 1 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 1, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 5000, + .tCALH_min = 5000, + .tCALS_min = 5000, + .tCAS_min = 5000, + .tCEH_min = 20000, + .tCH_min = 5000, + .tCK_min = 30000, + .tCS_min = 25000, + .tDH_min = 2500, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 2500, + .tDS_min = 3000, + .tDSC_min = 30000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 3000, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 2 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 2, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 4000, + .tCALH_min = 4000, + .tCALS_min = 4000, + .tCAS_min = 4000, + .tCEH_min = 20000, + .tCH_min = 4000, + .tCK_min = 20000, + .tCS_min = 15000, + .tDH_min = 1700, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 1700, + .tDS_min = 2000, + .tDSC_min = 20000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 2000, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 3 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 3, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 3000, + .tCALH_min = 3000, + .tCALS_min = 3000, + .tCAS_min = 3000, + .tCEH_min = 20000, + .tCH_min = 3000, + .tCK_min = 15000, + .tCS_min = 15000, + .tDH_min = 1300, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 1300, + .tDS_min = 1500, + .tDSC_min = 15000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 1500, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 4 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 4, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 2500, + .tCALH_min = 2500, + .tCALS_min = 2500, + .tCAS_min = 2500, + .tCEH_min = 20000, + .tCH_min = 2500, + .tCK_min = 12000, + .tCS_min = 15000, + .tDH_min = 1100, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 1000, + .tDS_min = 1100, + .tDSC_min = 12000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 1200, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, + /* Mode 5 */ + { + .type = NAND_NVDDR_IFACE, + .timings.mode = 5, + .timings.nvddr = { + .tCCS_min = 500000, + .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tAC_min = 3000, + .tAC_max = 25000, + .tADL_min = 400000, + .tCAD_min = 45000, + .tCAH_min = 2000, + .tCALH_min = 2000, + .tCALS_min = 2000, + .tCAS_min = 2000, + .tCEH_min = 20000, + .tCH_min = 2000, + .tCK_min = 10000, + .tCS_min = 15000, + .tDH_min = 900, + .tDQSCK_min = 3000, + .tDQSCK_max = 25000, + .tDQSD_min = 0, + .tDQSD_max = 18000, + .tDQSHZ_max = 20000, + .tDQSQ_max = 850, + .tDS_min = 900, + .tDSC_min = 10000, + .tFEAT_max = 1000000, + .tITC_max = 1000000, + .tQHS_max = 1000, + .tRHW_min = 100000, + .tRR_min = 20000, + .tRST_max = 500000000, + .tWB_max = 100000, + .tWHR_min = 80000, + .tWRCK_min = 20000, + .tWW_min = 100000, + }, + }, +}; + /* All NAND chips share the same reset data interface: SDR mode 0 */ const struct nand_interface_config *nand_get_reset_interface_config(void) { @@ -346,23 +601,60 @@ onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings) } /** - * onfi_fill_interface_config - Initialize an interface config from a given - * ONFI mode + * onfi_find_closest_nvddr_mode - Derive the closest ONFI NVDDR timing mode + * given a set of timings + * @spec_timings: the timings to challenge + */ +unsigned int +onfi_find_closest_nvddr_mode(const struct nand_nvddr_timings *spec_timings) +{ + const struct nand_nvddr_timings *onfi_timings; + int mode; + + for (mode = ARRAY_SIZE(onfi_nvddr_timings) - 1; mode > 0; mode--) { + onfi_timings = &onfi_nvddr_timings[mode].timings.nvddr; + + if (spec_timings->tCCS_min <= onfi_timings->tCCS_min && + spec_timings->tAC_min <= onfi_timings->tAC_min && + spec_timings->tADL_min <= onfi_timings->tADL_min && + spec_timings->tCAD_min <= onfi_timings->tCAD_min && + spec_timings->tCAH_min <= onfi_timings->tCAH_min && + spec_timings->tCALH_min <= onfi_timings->tCALH_min && + spec_timings->tCALS_min <= onfi_timings->tCALS_min && + spec_timings->tCAS_min <= onfi_timings->tCAS_min && + spec_timings->tCEH_min <= onfi_timings->tCEH_min && + spec_timings->tCH_min <= onfi_timings->tCH_min && + spec_timings->tCK_min <= onfi_timings->tCK_min && + spec_timings->tCS_min <= onfi_timings->tCS_min && + spec_timings->tDH_min <= onfi_timings->tDH_min && + spec_timings->tDQSCK_min <= onfi_timings->tDQSCK_min && + spec_timings->tDQSD_min <= onfi_timings->tDQSD_min && + spec_timings->tDS_min <= onfi_timings->tDS_min && + spec_timings->tDSC_min <= onfi_timings->tDSC_min && + spec_timings->tRHW_min <= onfi_timings->tRHW_min && + spec_timings->tRR_min <= onfi_timings->tRR_min && + spec_timings->tWHR_min <= onfi_timings->tWHR_min && + spec_timings->tWRCK_min <= onfi_timings->tWRCK_min && + spec_timings->tWW_min <= onfi_timings->tWW_min) + return mode; + } + + return 0; +} + +/* + * onfi_fill_sdr_interface_config - Initialize a SDR interface config from a + * given ONFI mode * @chip: The NAND chip * @iface: The interface configuration to fill - * @type: The interface type * @timing_mode: The ONFI timing mode */ -void onfi_fill_interface_config(struct nand_chip *chip, - struct nand_interface_config *iface, - enum nand_interface_type type, - unsigned int timing_mode) +static void onfi_fill_sdr_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + unsigned int timing_mode) { struct onfi_params *onfi = chip->parameters.onfi; - if (WARN_ON(type != NAND_SDR_IFACE)) - return; - if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings))) return; @@ -385,3 +677,61 @@ void onfi_fill_interface_config(struct nand_chip *chip, timings->tCCS_min = 1000UL * onfi->tCCS; } } + +/** + * onfi_fill_nvddr_interface_config - Initialize a NVDDR interface config from a + * given ONFI mode + * @chip: The NAND chip + * @iface: The interface configuration to fill + * @timing_mode: The ONFI timing mode + */ +static void onfi_fill_nvddr_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + unsigned int timing_mode) +{ + struct onfi_params *onfi = chip->parameters.onfi; + + if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_nvddr_timings))) + return; + + *iface = onfi_nvddr_timings[timing_mode]; + + /* + * Initialize timings that cannot be deduced from timing mode: + * tPROG, tBERS, tR, tCCS and tCAD. + * These information are part of the ONFI parameter page. + */ + if (onfi) { + struct nand_nvddr_timings *timings = &iface->timings.nvddr; + + /* microseconds -> picoseconds */ + timings->tPROG_max = 1000000ULL * onfi->tPROG; + timings->tBERS_max = 1000000ULL * onfi->tBERS; + timings->tR_max = 1000000ULL * onfi->tR; + + /* nanoseconds -> picoseconds */ + timings->tCCS_min = 1000UL * onfi->tCCS; + + if (onfi->fast_tCAD) + timings->tCAD_min = 25000; + } +} + +/** + * onfi_fill_interface_config - Initialize an interface config from a given + * ONFI mode + * @chip: The NAND chip + * @iface: The interface configuration to fill + * @type: The interface type + * @timing_mode: The ONFI timing mode + */ +void onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode) +{ + if (type == NAND_SDR_IFACE) + return onfi_fill_sdr_interface_config(chip, iface, timing_mode); + else + return onfi_fill_nvddr_interface_config(chip, iface, timing_mode); +} diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index c75e7a0b101fe7..b1839eef5b6580 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -131,7 +131,7 @@ #define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */ #define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */ -#define BADBLOCK_MARKER_LENGTH 2 +#define BBM_LEN 2 static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55, 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78, @@ -171,6 +171,10 @@ struct omap_nand_info { struct device *elm_dev; /* NAND ready gpio */ struct gpio_desc *ready_gpiod; + unsigned int neccpg; + unsigned int nsteps_per_eccpg; + unsigned int eccpg_size; + unsigned int eccpg_bytes; }; static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd) @@ -1355,7 +1359,7 @@ static int omap_elm_correct_data(struct nand_chip *chip, u_char *data, { struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip)); struct nand_ecc_ctrl *ecc = &info->nand.ecc; - int eccsteps = info->nand.ecc.steps; + int eccsteps = info->nsteps_per_eccpg; int i , j, stat = 0; int eccflag, actual_eccbytes; struct elm_errorvec err_vec[ERROR_VECTOR_MAX]; @@ -1525,24 +1529,37 @@ static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); - int ret; + struct omap_nand_info *info = mtd_to_omap(mtd); uint8_t *ecc_calc = chip->ecc.calc_buf; + unsigned int eccpg; + int ret; - nand_prog_page_begin_op(chip, page, 0, NULL, 0); + ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); + if (ret) + return ret; - /* Enable GPMC ecc engine */ - chip->ecc.hwctl(chip, NAND_ECC_WRITE); + for (eccpg = 0; eccpg < info->neccpg; eccpg++) { + /* Enable GPMC ecc engine */ + chip->ecc.hwctl(chip, NAND_ECC_WRITE); - /* Write data */ - chip->legacy.write_buf(chip, buf, mtd->writesize); + /* Write data */ + chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size), + info->eccpg_size); - /* Update ecc vector from GPMC result registers */ - omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]); + /* Update ecc vector from GPMC result registers */ + ret = omap_calculate_ecc_bch_multi(mtd, + buf + (eccpg * info->eccpg_size), + ecc_calc); + if (ret) + return ret; - ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, - chip->ecc.total); - if (ret) - return ret; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, + chip->oob_poi, + eccpg * info->eccpg_bytes, + info->eccpg_bytes); + if (ret) + return ret; + } /* Write ecc vector to OOB area */ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize); @@ -1566,12 +1583,13 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); + struct omap_nand_info *info = mtd_to_omap(mtd); u8 *ecc_calc = chip->ecc.calc_buf; int ecc_size = chip->ecc.size; int ecc_bytes = chip->ecc.bytes; - int ecc_steps = chip->ecc.steps; u32 start_step = offset / ecc_size; u32 end_step = (offset + data_len - 1) / ecc_size; + unsigned int eccpg; int step, ret = 0; /* @@ -1580,36 +1598,48 @@ static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset, * ECC is calculated for all subpages but we choose * only what we want. */ - nand_prog_page_begin_op(chip, page, 0, NULL, 0); - - /* Enable GPMC ECC engine */ - chip->ecc.hwctl(chip, NAND_ECC_WRITE); - - /* Write data */ - chip->legacy.write_buf(chip, buf, mtd->writesize); + ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0); + if (ret) + return ret; - for (step = 0; step < ecc_steps; step++) { - /* mask ECC of un-touched subpages by padding 0xFF */ - if (step < start_step || step > end_step) - memset(ecc_calc, 0xff, ecc_bytes); - else - ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step); + for (eccpg = 0; eccpg < info->neccpg; eccpg++) { + /* Enable GPMC ECC engine */ + chip->ecc.hwctl(chip, NAND_ECC_WRITE); + + /* Write data */ + chip->legacy.write_buf(chip, buf + (eccpg * info->eccpg_size), + info->eccpg_size); + + for (step = 0; step < info->nsteps_per_eccpg; step++) { + unsigned int base_step = eccpg * info->nsteps_per_eccpg; + const u8 *bufoffs = buf + (eccpg * info->eccpg_size); + + /* Mask ECC of un-touched subpages with 0xFFs */ + if ((step + base_step) < start_step || + (step + base_step) > end_step) + memset(ecc_calc + (step * ecc_bytes), 0xff, + ecc_bytes); + else + ret = _omap_calculate_ecc_bch(mtd, + bufoffs + (step * ecc_size), + ecc_calc + (step * ecc_bytes), + step); + + if (ret) + return ret; + } + /* + * Copy the calculated ECC for the whole page including the + * masked values (0xFF) corresponding to unwritten subpages. + */ + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, + eccpg * info->eccpg_bytes, + info->eccpg_bytes); if (ret) return ret; - - buf += ecc_size; - ecc_calc += ecc_bytes; } - /* copy calculated ECC for whole page to chip->buffer->oob */ - /* this include masked-value(0xFF) for unwritten subpages */ - ecc_calc = chip->ecc.calc_buf; - ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, - chip->ecc.total); - if (ret) - return ret; - /* write OOB buffer to NAND device */ chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize); @@ -1634,40 +1664,60 @@ static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct mtd_info *mtd = nand_to_mtd(chip); + struct omap_nand_info *info = mtd_to_omap(mtd); uint8_t *ecc_calc = chip->ecc.calc_buf; uint8_t *ecc_code = chip->ecc.code_buf; + unsigned int max_bitflips = 0, eccpg; int stat, ret; - unsigned int max_bitflips = 0; - - nand_read_page_op(chip, page, 0, NULL, 0); - /* Enable GPMC ecc engine */ - chip->ecc.hwctl(chip, NAND_ECC_READ); + ret = nand_read_page_op(chip, page, 0, NULL, 0); + if (ret) + return ret; - /* Read data */ - chip->legacy.read_buf(chip, buf, mtd->writesize); + for (eccpg = 0; eccpg < info->neccpg; eccpg++) { + /* Enable GPMC ecc engine */ + chip->ecc.hwctl(chip, NAND_ECC_READ); - /* Read oob bytes */ - nand_change_read_column_op(chip, - mtd->writesize + BADBLOCK_MARKER_LENGTH, - chip->oob_poi + BADBLOCK_MARKER_LENGTH, - chip->ecc.total, false); + /* Read data */ + ret = nand_change_read_column_op(chip, eccpg * info->eccpg_size, + buf + (eccpg * info->eccpg_size), + info->eccpg_size, false); + if (ret) + return ret; - /* Calculate ecc bytes */ - omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); + /* Read oob bytes */ + ret = nand_change_read_column_op(chip, + mtd->writesize + BBM_LEN + + (eccpg * info->eccpg_bytes), + chip->oob_poi + BBM_LEN + + (eccpg * info->eccpg_bytes), + info->eccpg_bytes, false); + if (ret) + return ret; - ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, - chip->ecc.total); - if (ret) - return ret; + /* Calculate ecc bytes */ + ret = omap_calculate_ecc_bch_multi(mtd, + buf + (eccpg * info->eccpg_size), + ecc_calc); + if (ret) + return ret; - stat = chip->ecc.correct(chip, buf, ecc_code, ecc_calc); + ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, + chip->oob_poi, + eccpg * info->eccpg_bytes, + info->eccpg_bytes); + if (ret) + return ret; - if (stat < 0) { - mtd->ecc_stats.failed++; - } else { - mtd->ecc_stats.corrected += stat; - max_bitflips = max_t(unsigned int, max_bitflips, stat); + stat = chip->ecc.correct(chip, + buf + (eccpg * info->eccpg_size), + ecc_code, ecc_calc); + if (stat < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(unsigned int, max_bitflips, stat); + } } return max_bitflips; @@ -1820,7 +1870,7 @@ static int omap_ooblayout_ecc(struct mtd_info *mtd, int section, { struct omap_nand_info *info = mtd_to_omap(mtd); struct nand_chip *chip = &info->nand; - int off = BADBLOCK_MARKER_LENGTH; + int off = BBM_LEN; if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW && !(chip->options & NAND_BUSWIDTH_16)) @@ -1840,7 +1890,7 @@ static int omap_ooblayout_free(struct mtd_info *mtd, int section, { struct omap_nand_info *info = mtd_to_omap(mtd); struct nand_chip *chip = &info->nand; - int off = BADBLOCK_MARKER_LENGTH; + int off = BBM_LEN; if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW && !(chip->options & NAND_BUSWIDTH_16)) @@ -1870,7 +1920,7 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section, struct nand_device *nand = mtd_to_nanddev(mtd); unsigned int nsteps = nanddev_get_ecc_nsteps(nand); unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand); - int off = BADBLOCK_MARKER_LENGTH; + int off = BBM_LEN; if (section >= nsteps) return -ERANGE; @@ -1891,7 +1941,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section, struct nand_device *nand = mtd_to_nanddev(mtd); unsigned int nsteps = nanddev_get_ecc_nsteps(nand); unsigned int ecc_bytes = nanddev_get_ecc_bytes_per_step(nand); - int off = BADBLOCK_MARKER_LENGTH; + int off = BBM_LEN; if (section) return -ERANGE; @@ -1920,7 +1970,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip) struct mtd_info *mtd = nand_to_mtd(chip); struct omap_nand_info *info = mtd_to_omap(mtd); struct device *dev = &info->pdev->dev; - int min_oobbytes = BADBLOCK_MARKER_LENGTH; + int min_oobbytes = BBM_LEN; + int elm_bch_strength = -1; int oobbytes_per_step; dma_cap_mask_t mask; int err; @@ -2074,12 +2125,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip) chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = chip->ecc.bytes; - - err = elm_config(info->elm_dev, BCH4_ECC, - mtd->writesize / chip->ecc.size, - chip->ecc.size, chip->ecc.bytes); - if (err < 0) - return err; + elm_bch_strength = BCH4_ECC; break; case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: @@ -2116,13 +2162,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip) chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = chip->ecc.bytes; - - err = elm_config(info->elm_dev, BCH8_ECC, - mtd->writesize / chip->ecc.size, - chip->ecc.size, chip->ecc.bytes); - if (err < 0) - return err; - + elm_bch_strength = BCH8_ECC; break; case OMAP_ECC_BCH16_CODE_HW: @@ -2138,19 +2178,32 @@ static int omap_nand_attach_chip(struct nand_chip *chip) chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = chip->ecc.bytes; - - err = elm_config(info->elm_dev, BCH16_ECC, - mtd->writesize / chip->ecc.size, - chip->ecc.size, chip->ecc.bytes); - if (err < 0) - return err; - + elm_bch_strength = BCH16_ECC; break; default: dev_err(dev, "Invalid or unsupported ECC scheme\n"); return -EINVAL; } + if (elm_bch_strength >= 0) { + chip->ecc.steps = mtd->writesize / chip->ecc.size; + info->neccpg = chip->ecc.steps / ERROR_VECTOR_MAX; + if (info->neccpg) { + info->nsteps_per_eccpg = ERROR_VECTOR_MAX; + } else { + info->neccpg = 1; + info->nsteps_per_eccpg = chip->ecc.steps; + } + info->eccpg_size = info->nsteps_per_eccpg * chip->ecc.size; + info->eccpg_bytes = info->nsteps_per_eccpg * chip->ecc.bytes; + + err = elm_config(info->elm_dev, elm_bch_strength, + info->nsteps_per_eccpg, chip->ecc.size, + chip->ecc.bytes); + if (err < 0) + return err; + } + /* Check if NAND device's OOB is enough to store ECC signatures */ min_oobbytes += (oobbytes_per_step * (mtd->writesize / chip->ecc.size)); diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 550695a4c1ab07..2b21ce04b3ec6e 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -116,7 +116,7 @@ int elm_config(struct device *dev, enum bch_ecc bch_type, return -EINVAL; } /* ELM support 8 error syndrome process */ - if (ecc_steps > ERROR_VECTOR_MAX) { + if (ecc_steps > ERROR_VECTOR_MAX && ecc_steps % ERROR_VECTOR_MAX) { dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps); return -EINVAL; } diff --git a/drivers/mtd/nand/raw/pl35x-nand-controller.c b/drivers/mtd/nand/raw/pl35x-nand-controller.c new file mode 100644 index 00000000000000..8a91e069ee2e9b --- /dev/null +++ b/drivers/mtd/nand/raw/pl35x-nand-controller.c @@ -0,0 +1,1194 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM PL35X NAND flash controller driver + * + * Copyright (C) 2017 Xilinx, Inc + * Author: + * Miquel Raynal + * Original work (rewritten): + * Punnaiah Choudary Kalluri + * Naga Sureshkumar Relli + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PL35X_NANDC_DRIVER_NAME "pl35x-nand-controller" + +/* SMC controller status register (RO) */ +#define PL35X_SMC_MEMC_STATUS 0x0 +#define PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1 BIT(6) +/* SMC clear config register (WO) */ +#define PL35X_SMC_MEMC_CFG_CLR 0xC +#define PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1 BIT(1) +#define PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 BIT(4) +#define PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 BIT(6) +/* SMC direct command register (WO) */ +#define PL35X_SMC_DIRECT_CMD 0x10 +#define PL35X_SMC_DIRECT_CMD_NAND_CS (0x4 << 23) +#define PL35X_SMC_DIRECT_CMD_UPD_REGS (0x2 << 21) +/* SMC set cycles register (WO) */ +#define PL35X_SMC_CYCLES 0x14 +#define PL35X_SMC_NAND_TRC_CYCLES(x) ((x) << 0) +#define PL35X_SMC_NAND_TWC_CYCLES(x) ((x) << 4) +#define PL35X_SMC_NAND_TREA_CYCLES(x) ((x) << 8) +#define PL35X_SMC_NAND_TWP_CYCLES(x) ((x) << 11) +#define PL35X_SMC_NAND_TCLR_CYCLES(x) ((x) << 14) +#define PL35X_SMC_NAND_TAR_CYCLES(x) ((x) << 17) +#define PL35X_SMC_NAND_TRR_CYCLES(x) ((x) << 20) +/* SMC set opmode register (WO) */ +#define PL35X_SMC_OPMODE 0x18 +#define PL35X_SMC_OPMODE_BW_8 0 +#define PL35X_SMC_OPMODE_BW_16 1 +/* SMC ECC status register (RO) */ +#define PL35X_SMC_ECC_STATUS 0x400 +#define PL35X_SMC_ECC_STATUS_ECC_BUSY BIT(6) +/* SMC ECC configuration register */ +#define PL35X_SMC_ECC_CFG 0x404 +#define PL35X_SMC_ECC_CFG_MODE_MASK 0xC +#define PL35X_SMC_ECC_CFG_MODE_BYPASS 0 +#define PL35X_SMC_ECC_CFG_MODE_APB BIT(2) +#define PL35X_SMC_ECC_CFG_MODE_MEM BIT(3) +#define PL35X_SMC_ECC_CFG_PGSIZE_MASK 0x3 +/* SMC ECC command 1 register */ +#define PL35X_SMC_ECC_CMD1 0x408 +#define PL35X_SMC_ECC_CMD1_WRITE(x) ((x) << 0) +#define PL35X_SMC_ECC_CMD1_READ(x) ((x) << 8) +#define PL35X_SMC_ECC_CMD1_READ_END(x) ((x) << 16) +#define PL35X_SMC_ECC_CMD1_READ_END_VALID(x) ((x) << 24) +/* SMC ECC command 2 register */ +#define PL35X_SMC_ECC_CMD2 0x40C +#define PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(x) ((x) << 0) +#define PL35X_SMC_ECC_CMD2_READ_COL_CHG(x) ((x) << 8) +#define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(x) ((x) << 16) +#define PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(x) ((x) << 24) +/* SMC ECC value registers (RO) */ +#define PL35X_SMC_ECC_VALUE(x) (0x418 + (4 * (x))) +#define PL35X_SMC_ECC_VALUE_IS_CORRECTABLE(x) ((x) & BIT(27)) +#define PL35X_SMC_ECC_VALUE_HAS_FAILED(x) ((x) & BIT(28)) +#define PL35X_SMC_ECC_VALUE_IS_VALID(x) ((x) & BIT(30)) + +/* NAND AXI interface */ +#define PL35X_SMC_CMD_PHASE 0 +#define PL35X_SMC_CMD_PHASE_CMD0(x) ((x) << 3) +#define PL35X_SMC_CMD_PHASE_CMD1(x) ((x) << 11) +#define PL35X_SMC_CMD_PHASE_CMD1_VALID BIT(20) +#define PL35X_SMC_CMD_PHASE_ADDR(pos, x) ((x) << (8 * (pos))) +#define PL35X_SMC_CMD_PHASE_NADDRS(x) ((x) << 21) +#define PL35X_SMC_DATA_PHASE BIT(19) +#define PL35X_SMC_DATA_PHASE_ECC_LAST BIT(10) +#define PL35X_SMC_DATA_PHASE_CLEAR_CS BIT(21) + +#define PL35X_NAND_MAX_CS 1 +#define PL35X_NAND_LAST_XFER_SZ 4 +#define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP((ps) / 1000, period_ns)) + +#define PL35X_NAND_ECC_BITS_MASK 0xFFF +#define PL35X_NAND_ECC_BYTE_OFF_MASK 0x1FF +#define PL35X_NAND_ECC_BIT_OFF_MASK 0x7 + +struct pl35x_nand_timings { + unsigned int t_rc:4; + unsigned int t_wc:4; + unsigned int t_rea:3; + unsigned int t_wp:3; + unsigned int t_clr:3; + unsigned int t_ar:3; + unsigned int t_rr:4; + unsigned int rsvd:8; +}; + +struct pl35x_nand { + struct list_head node; + struct nand_chip chip; + unsigned int cs; + unsigned int addr_cycles; + u32 ecc_cfg; + u32 timings; +}; + +/** + * struct pl35x_nandc - NAND flash controller driver structure + * @dev: Kernel device + * @conf_regs: SMC configuration registers for command phase + * @io_regs: NAND data registers for data phase + * @controller: Core NAND controller structure + * @chip: NAND chip information structure + * @selected_chip: NAND chip currently selected by the controller + * @assigned_cs: List of assigned CS + * @ecc_buf: Temporary buffer to extract ECC bytes + */ +struct pl35x_nandc { + struct device *dev; + void __iomem *conf_regs; + void __iomem *io_regs; + struct nand_controller controller; + struct list_head chips; + struct nand_chip *selected_chip; + unsigned long assigned_cs; + u8 *ecc_buf; +}; + +static inline struct pl35x_nandc *to_pl35x_nandc(struct nand_controller *ctrl) +{ + return container_of(ctrl, struct pl35x_nandc, controller); +} + +static inline struct pl35x_nand *to_pl35x_nand(struct nand_chip *chip) +{ + return container_of(chip, struct pl35x_nand, chip); +} + +static int pl35x_ecc_ooblayout16_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section >= chip->ecc.steps) + return -ERANGE; + + oobregion->offset = (section * chip->ecc.bytes); + oobregion->length = chip->ecc.bytes; + + return 0; +} + +static int pl35x_ecc_ooblayout16_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + + if (section >= chip->ecc.steps) + return -ERANGE; + + oobregion->offset = (section * chip->ecc.bytes) + 8; + oobregion->length = 8; + + return 0; +} + +static const struct mtd_ooblayout_ops pl35x_ecc_ooblayout16_ops = { + .ecc = pl35x_ecc_ooblayout16_ecc, + .free = pl35x_ecc_ooblayout16_free, +}; + +/* Generic flash bbt decriptors */ +static u8 bbt_pattern[] = { 'B', 'b', 't', '0' }; +static u8 mirror_pattern[] = { '1', 't', 'b', 'B' }; + +static struct nand_bbt_descr bbt_main_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE + | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, + .offs = 4, + .len = 4, + .veroffs = 20, + .maxblocks = 4, + .pattern = bbt_pattern +}; + +static struct nand_bbt_descr bbt_mirror_descr = { + .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE + | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, + .offs = 4, + .len = 4, + .veroffs = 20, + .maxblocks = 4, + .pattern = mirror_pattern +}; + +static void pl35x_smc_update_regs(struct pl35x_nandc *nfc) +{ + writel(PL35X_SMC_DIRECT_CMD_NAND_CS | + PL35X_SMC_DIRECT_CMD_UPD_REGS, + nfc->conf_regs + PL35X_SMC_DIRECT_CMD); +} + +static int pl35x_smc_set_buswidth(struct pl35x_nandc *nfc, unsigned int bw) +{ + if (bw != PL35X_SMC_OPMODE_BW_8 && bw != PL35X_SMC_OPMODE_BW_16) + return -EINVAL; + + writel(bw, nfc->conf_regs + PL35X_SMC_OPMODE); + pl35x_smc_update_regs(nfc); + + return 0; +} + +static void pl35x_smc_clear_irq(struct pl35x_nandc *nfc) +{ + writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1, + nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR); +} + +static int pl35x_smc_wait_for_irq(struct pl35x_nandc *nfc) +{ + u32 reg; + int ret; + + ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_MEMC_STATUS, reg, + reg & PL35X_SMC_MEMC_STATUS_RAW_INT_STATUS1, + 10, 1000000); + if (ret) + dev_err(nfc->dev, + "Timeout polling on NAND controller interrupt (0x%x)\n", + reg); + + pl35x_smc_clear_irq(nfc); + + return ret; +} + +static int pl35x_smc_wait_for_ecc_done(struct pl35x_nandc *nfc) +{ + u32 reg; + int ret; + + ret = readl_poll_timeout(nfc->conf_regs + PL35X_SMC_ECC_STATUS, reg, + !(reg & PL35X_SMC_ECC_STATUS_ECC_BUSY), + 10, 1000000); + if (ret) + dev_err(nfc->dev, + "Timeout polling on ECC controller interrupt\n"); + + return ret; +} + +static int pl35x_smc_set_ecc_mode(struct pl35x_nandc *nfc, + struct nand_chip *chip, + unsigned int mode) +{ + struct pl35x_nand *plnand; + u32 ecc_cfg; + + ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG); + ecc_cfg &= ~PL35X_SMC_ECC_CFG_MODE_MASK; + ecc_cfg |= mode; + writel(ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG); + + if (chip) { + plnand = to_pl35x_nand(chip); + plnand->ecc_cfg = ecc_cfg; + } + + if (mode != PL35X_SMC_ECC_CFG_MODE_BYPASS) + return pl35x_smc_wait_for_ecc_done(nfc); + + return 0; +} + +static void pl35x_smc_force_byte_access(struct nand_chip *chip, + bool force_8bit) +{ + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + int ret; + + if (!(chip->options & NAND_BUSWIDTH_16)) + return; + + if (force_8bit) + ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8); + else + ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_16); + + if (ret) + dev_err(nfc->dev, "Error in Buswidth\n"); +} + +static void pl35x_nand_select_target(struct nand_chip *chip, + unsigned int die_nr) +{ + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + struct pl35x_nand *plnand = to_pl35x_nand(chip); + + if (chip == nfc->selected_chip) + return; + + /* Setup the timings */ + writel(plnand->timings, nfc->conf_regs + PL35X_SMC_CYCLES); + pl35x_smc_update_regs(nfc); + + /* Configure the ECC engine */ + writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG); + + nfc->selected_chip = chip; +} + +static void pl35x_nand_read_data_op(struct nand_chip *chip, u8 *in, + unsigned int len, bool force_8bit, + unsigned int flags, unsigned int last_flags) +{ + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + unsigned int buf_end = len / 4; + unsigned int in_start = round_down(len, 4); + unsigned int data_phase_addr; + u32 *buf32 = (u32 *)in; + u8 *buf8 = (u8 *)in; + int i; + + if (force_8bit) + pl35x_smc_force_byte_access(chip, true); + + for (i = 0; i < buf_end; i++) { + data_phase_addr = PL35X_SMC_DATA_PHASE + flags; + if (i + 1 == buf_end) + data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags; + + buf32[i] = readl(nfc->io_regs + data_phase_addr); + } + + /* No working extra flags on unaligned data accesses */ + for (i = in_start; i < len; i++) + buf8[i] = readb(nfc->io_regs + PL35X_SMC_DATA_PHASE); + + if (force_8bit) + pl35x_smc_force_byte_access(chip, false); +} + +static void pl35x_nand_write_data_op(struct nand_chip *chip, const u8 *out, + int len, bool force_8bit, + unsigned int flags, + unsigned int last_flags) +{ + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + unsigned int buf_end = len / 4; + unsigned int in_start = round_down(len, 4); + const u32 *buf32 = (const u32 *)out; + const u8 *buf8 = (const u8 *)out; + unsigned int data_phase_addr; + int i; + + if (force_8bit) + pl35x_smc_force_byte_access(chip, true); + + for (i = 0; i < buf_end; i++) { + data_phase_addr = PL35X_SMC_DATA_PHASE + flags; + if (i + 1 == buf_end) + data_phase_addr = PL35X_SMC_DATA_PHASE + last_flags; + + writel(buf32[i], nfc->io_regs + data_phase_addr); + } + + /* No working extra flags on unaligned data accesses */ + for (i = in_start; i < len; i++) + writeb(buf8[i], nfc->io_regs + PL35X_SMC_DATA_PHASE); + + if (force_8bit) + pl35x_smc_force_byte_access(chip, false); +} + +static int pl35x_nand_correct_data(struct pl35x_nandc *nfc, unsigned char *buf, + unsigned char *read_ecc, + unsigned char *calc_ecc) +{ + unsigned short ecc_odd, ecc_even, read_ecc_lower, read_ecc_upper; + unsigned short calc_ecc_lower, calc_ecc_upper; + unsigned short byte_addr, bit_addr; + + read_ecc_lower = (read_ecc[0] | (read_ecc[1] << 8)) & + PL35X_NAND_ECC_BITS_MASK; + read_ecc_upper = ((read_ecc[1] >> 4) | (read_ecc[2] << 4)) & + PL35X_NAND_ECC_BITS_MASK; + + calc_ecc_lower = (calc_ecc[0] | (calc_ecc[1] << 8)) & + PL35X_NAND_ECC_BITS_MASK; + calc_ecc_upper = ((calc_ecc[1] >> 4) | (calc_ecc[2] << 4)) & + PL35X_NAND_ECC_BITS_MASK; + + ecc_odd = read_ecc_lower ^ calc_ecc_lower; + ecc_even = read_ecc_upper ^ calc_ecc_upper; + + /* No error */ + if (likely(!ecc_odd && !ecc_even)) + return 0; + + /* One error in the main data; to be corrected */ + if (ecc_odd == (~ecc_even & PL35X_NAND_ECC_BITS_MASK)) { + /* Bits [11:3] of error code give the byte offset */ + byte_addr = (ecc_odd >> 3) & PL35X_NAND_ECC_BYTE_OFF_MASK; + /* Bits [2:0] of error code give the bit offset */ + bit_addr = ecc_odd & PL35X_NAND_ECC_BIT_OFF_MASK; + /* Toggle the faulty bit */ + buf[byte_addr] ^= (BIT(bit_addr)); + + return 1; + } + + /* One error in the ECC data; no action needed */ + if (hweight32(ecc_odd | ecc_even) == 1) + return 1; + + return -EBADMSG; +} + +static void pl35x_nand_ecc_reg_to_array(struct nand_chip *chip, u32 ecc_reg, + u8 *ecc_array) +{ + u32 ecc_value = ~ecc_reg; + unsigned int ecc_byte; + + for (ecc_byte = 0; ecc_byte < chip->ecc.bytes; ecc_byte++) + ecc_array[ecc_byte] = ecc_value >> (8 * ecc_byte); +} + +static int pl35x_nand_read_eccbytes(struct pl35x_nandc *nfc, + struct nand_chip *chip, u8 *read_ecc) +{ + u32 ecc_value; + int chunk; + + for (chunk = 0; chunk < chip->ecc.steps; + chunk++, read_ecc += chip->ecc.bytes) { + ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk)); + if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value)) + return -EINVAL; + + pl35x_nand_ecc_reg_to_array(chip, ecc_value, read_ecc); + } + + return 0; +} + +static int pl35x_nand_recover_data_hwecc(struct pl35x_nandc *nfc, + struct nand_chip *chip, u8 *data, + u8 *read_ecc) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int max_bitflips = 0, chunk; + u8 calc_ecc[3]; + u32 ecc_value; + int stats; + + for (chunk = 0; chunk < chip->ecc.steps; + chunk++, data += chip->ecc.size, read_ecc += chip->ecc.bytes) { + /* Read ECC value for each chunk */ + ecc_value = readl(nfc->conf_regs + PL35X_SMC_ECC_VALUE(chunk)); + + if (!PL35X_SMC_ECC_VALUE_IS_VALID(ecc_value)) + return -EINVAL; + + if (PL35X_SMC_ECC_VALUE_HAS_FAILED(ecc_value)) { + mtd->ecc_stats.failed++; + continue; + } + + pl35x_nand_ecc_reg_to_array(chip, ecc_value, calc_ecc); + stats = pl35x_nand_correct_data(nfc, data, read_ecc, calc_ecc); + if (stats < 0) { + mtd->ecc_stats.failed++; + } else { + mtd->ecc_stats.corrected += stats; + max_bitflips = max_t(unsigned int, max_bitflips, stats); + } + } + + return max_bitflips; +} + +static int pl35x_nand_write_page_hwecc(struct nand_chip *chip, + const u8 *buf, int oob_required, + int page) +{ + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + struct pl35x_nand *plnand = to_pl35x_nand(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2; + unsigned int nrows = plnand->addr_cycles; + u32 addr1 = 0, addr2 = 0, row; + u32 cmd_addr; + int i, ret; + + ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB); + if (ret) + return ret; + + cmd_addr = PL35X_SMC_CMD_PHASE | + PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) | + PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_SEQIN); + + for (i = 0, row = first_row; row < nrows; i++, row++) { + u8 addr = page >> ((i * 8) & 0xFF); + + if (row < 4) + addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr); + else + addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr); + } + + /* Send the command and address cycles */ + writel(addr1, nfc->io_regs + cmd_addr); + if (plnand->addr_cycles > 4) + writel(addr2, nfc->io_regs + cmd_addr); + + /* Write the data with the engine enabled */ + pl35x_nand_write_data_op(chip, buf, mtd->writesize, false, + 0, PL35X_SMC_DATA_PHASE_ECC_LAST); + ret = pl35x_smc_wait_for_ecc_done(nfc); + if (ret) + goto disable_ecc_engine; + + /* Copy the HW calculated ECC bytes in the OOB buffer */ + ret = pl35x_nand_read_eccbytes(nfc, chip, nfc->ecc_buf); + if (ret) + goto disable_ecc_engine; + + if (!oob_required) + memset(chip->oob_poi, 0xFF, mtd->oobsize); + + ret = mtd_ooblayout_set_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, + 0, chip->ecc.total); + if (ret) + goto disable_ecc_engine; + + /* Write the spare area with ECC bytes */ + pl35x_nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false, 0, + PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_PAGEPROG) | + PL35X_SMC_CMD_PHASE_CMD1_VALID | + PL35X_SMC_DATA_PHASE_CLEAR_CS); + ret = pl35x_smc_wait_for_irq(nfc); + if (ret) + goto disable_ecc_engine; + +disable_ecc_engine: + pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS); + + return ret; +} + +/* + * This functions reads data and checks the data integrity by comparing hardware + * generated ECC values and read ECC values from spare area. + * + * There is a limitation with SMC controller: ECC_LAST must be set on the + * last data access to tell the ECC engine not to expect any further data. + * In practice, this implies to shrink the last data transfert by eg. 4 bytes, + * and doing a last 4-byte transfer with the additional bit set. The last block + * should be aligned with the end of an ECC block. Because of this limitation, + * it is not possible to use the core routines. + */ +static int pl35x_nand_read_page_hwecc(struct nand_chip *chip, + u8 *buf, int oob_required, int page) +{ + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + struct pl35x_nand *plnand = to_pl35x_nand(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + unsigned int first_row = (mtd->writesize <= 512) ? 1 : 2; + unsigned int nrows = plnand->addr_cycles; + unsigned int addr1 = 0, addr2 = 0, row; + u32 cmd_addr; + int i, ret; + + ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB); + if (ret) + return ret; + + cmd_addr = PL35X_SMC_CMD_PHASE | + PL35X_SMC_CMD_PHASE_NADDRS(plnand->addr_cycles) | + PL35X_SMC_CMD_PHASE_CMD0(NAND_CMD_READ0) | + PL35X_SMC_CMD_PHASE_CMD1(NAND_CMD_READSTART) | + PL35X_SMC_CMD_PHASE_CMD1_VALID; + + for (i = 0, row = first_row; row < nrows; i++, row++) { + u8 addr = page >> ((i * 8) & 0xFF); + + if (row < 4) + addr1 |= PL35X_SMC_CMD_PHASE_ADDR(row, addr); + else + addr2 |= PL35X_SMC_CMD_PHASE_ADDR(row - 4, addr); + } + + /* Send the command and address cycles */ + writel(addr1, nfc->io_regs + cmd_addr); + if (plnand->addr_cycles > 4) + writel(addr2, nfc->io_regs + cmd_addr); + + /* Wait the data to be available in the NAND cache */ + ndelay(PSEC_TO_NSEC(sdr->tRR_min)); + ret = pl35x_smc_wait_for_irq(nfc); + if (ret) + goto disable_ecc_engine; + + /* Retrieve the raw data with the engine enabled */ + pl35x_nand_read_data_op(chip, buf, mtd->writesize, false, + 0, PL35X_SMC_DATA_PHASE_ECC_LAST); + ret = pl35x_smc_wait_for_ecc_done(nfc); + if (ret) + goto disable_ecc_engine; + + /* Retrieve the stored ECC bytes */ + pl35x_nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, + 0, PL35X_SMC_DATA_PHASE_CLEAR_CS); + ret = mtd_ooblayout_get_eccbytes(mtd, nfc->ecc_buf, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + goto disable_ecc_engine; + + pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS); + + /* Correct the data and report failures */ + return pl35x_nand_recover_data_hwecc(nfc, chip, buf, nfc->ecc_buf); + +disable_ecc_engine: + pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS); + + return ret; +} + +static int pl35x_nand_exec_op(struct nand_chip *chip, + const struct nand_subop *subop) +{ + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + const struct nand_op_instr *instr, *data_instr = NULL; + unsigned int rdy_tim_ms = 0, naddrs = 0, cmds = 0, last_flags = 0; + u32 addr1 = 0, addr2 = 0, cmd0 = 0, cmd1 = 0, cmd_addr = 0; + unsigned int op_id, len, offset, rdy_del_ns; + int last_instr_type = -1; + bool cmd1_valid = false; + const u8 *addrs; + int i, ret; + + for (op_id = 0; op_id < subop->ninstrs; op_id++) { + instr = &subop->instrs[op_id]; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + if (!cmds) { + cmd0 = PL35X_SMC_CMD_PHASE_CMD0(instr->ctx.cmd.opcode); + } else { + cmd1 = PL35X_SMC_CMD_PHASE_CMD1(instr->ctx.cmd.opcode); + if (last_instr_type != NAND_OP_DATA_OUT_INSTR) + cmd1_valid = true; + } + cmds++; + break; + + case NAND_OP_ADDR_INSTR: + offset = nand_subop_get_addr_start_off(subop, op_id); + naddrs = nand_subop_get_num_addr_cyc(subop, op_id); + addrs = &instr->ctx.addr.addrs[offset]; + cmd_addr |= PL35X_SMC_CMD_PHASE_NADDRS(naddrs); + + for (i = offset; i < naddrs; i++) { + if (i < 4) + addr1 |= PL35X_SMC_CMD_PHASE_ADDR(i, addrs[i]); + else + addr2 |= PL35X_SMC_CMD_PHASE_ADDR(i - 4, addrs[i]); + } + break; + + case NAND_OP_DATA_IN_INSTR: + case NAND_OP_DATA_OUT_INSTR: + data_instr = instr; + len = nand_subop_get_data_len(subop, op_id); + break; + + case NAND_OP_WAITRDY_INSTR: + rdy_tim_ms = instr->ctx.waitrdy.timeout_ms; + rdy_del_ns = instr->delay_ns; + break; + } + + last_instr_type = instr->type; + } + + /* Command phase */ + cmd_addr |= PL35X_SMC_CMD_PHASE | cmd0 | cmd1 | + (cmd1_valid ? PL35X_SMC_CMD_PHASE_CMD1_VALID : 0); + writel(addr1, nfc->io_regs + cmd_addr); + if (naddrs > 4) + writel(addr2, nfc->io_regs + cmd_addr); + + /* Data phase */ + if (data_instr && data_instr->type == NAND_OP_DATA_OUT_INSTR) { + last_flags = PL35X_SMC_DATA_PHASE_CLEAR_CS; + if (cmds == 2) + last_flags |= cmd1 | PL35X_SMC_CMD_PHASE_CMD1_VALID; + + pl35x_nand_write_data_op(chip, data_instr->ctx.data.buf.out, + len, data_instr->ctx.data.force_8bit, + 0, last_flags); + } + + if (rdy_tim_ms) { + ndelay(rdy_del_ns); + ret = pl35x_smc_wait_for_irq(nfc); + if (ret) + return ret; + } + + if (data_instr && data_instr->type == NAND_OP_DATA_IN_INSTR) + pl35x_nand_read_data_op(chip, data_instr->ctx.data.buf.in, + len, data_instr->ctx.data.force_8bit, + 0, PL35X_SMC_DATA_PHASE_CLEAR_CS); + + return 0; +} + +static const struct nand_op_parser pl35x_nandc_op_parser = NAND_OP_PARSER( + NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op, + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_ADDR_ELEM(true, 7), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true), + NAND_OP_PARSER_PAT_DATA_IN_ELEM(true, 2112)), + NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112), + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), + NAND_OP_PARSER_PATTERN(pl35x_nand_exec_op, + NAND_OP_PARSER_PAT_CMD_ELEM(false), + NAND_OP_PARSER_PAT_ADDR_ELEM(false, 7), + NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 2112), + NAND_OP_PARSER_PAT_CMD_ELEM(true), + NAND_OP_PARSER_PAT_WAITRDY_ELEM(true)), + ); + +static int pl35x_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + if (!check_only) + pl35x_nand_select_target(chip, op->cs); + + return nand_op_parser_exec_op(chip, &pl35x_nandc_op_parser, + op, check_only); +} + +static int pl35x_nfc_setup_interface(struct nand_chip *chip, int cs, + const struct nand_interface_config *conf) +{ + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + struct pl35x_nand *plnand = to_pl35x_nand(chip); + struct pl35x_nand_timings tmgs = {}; + const struct nand_sdr_timings *sdr; + unsigned int period_ns, val; + struct clk *mclk; + + sdr = nand_get_sdr_timings(conf); + if (IS_ERR(sdr)) + return PTR_ERR(sdr); + + mclk = of_clk_get_by_name(nfc->dev->parent->of_node, "memclk"); + if (IS_ERR(mclk)) { + dev_err(nfc->dev, "Failed to retrieve SMC memclk\n"); + return PTR_ERR(mclk); + } + + /* + * SDR timings are given in pico-seconds while NFC timings must be + * expressed in NAND controller clock cycles. We use the TO_CYCLE() + * macro to convert from one to the other. + */ + period_ns = NSEC_PER_SEC / clk_get_rate(mclk); + + /* + * PL35X SMC needs one extra read cycle in SDR Mode 5. This is not + * written anywhere in the datasheet but is an empirical observation. + */ + val = TO_CYCLES(sdr->tRC_min, period_ns); + if (sdr->tRC_min <= 20000) + val++; + + tmgs.t_rc = val; + if (tmgs.t_rc != val || tmgs.t_rc < 2) + return -EINVAL; + + val = TO_CYCLES(sdr->tWC_min, period_ns); + tmgs.t_wc = val; + if (tmgs.t_wc != val || tmgs.t_wc < 2) + return -EINVAL; + + /* + * For all SDR modes, PL35X SMC needs tREA_max being 1, + * this is also an empirical result. + */ + tmgs.t_rea = 1; + + val = TO_CYCLES(sdr->tWP_min, period_ns); + tmgs.t_wp = val; + if (tmgs.t_wp != val || tmgs.t_wp < 1) + return -EINVAL; + + val = TO_CYCLES(sdr->tCLR_min, period_ns); + tmgs.t_clr = val; + if (tmgs.t_clr != val) + return -EINVAL; + + val = TO_CYCLES(sdr->tAR_min, period_ns); + tmgs.t_ar = val; + if (tmgs.t_ar != val) + return -EINVAL; + + val = TO_CYCLES(sdr->tRR_min, period_ns); + tmgs.t_rr = val; + if (tmgs.t_rr != val) + return -EINVAL; + + if (cs == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + plnand->timings = PL35X_SMC_NAND_TRC_CYCLES(tmgs.t_rc) | + PL35X_SMC_NAND_TWC_CYCLES(tmgs.t_wc) | + PL35X_SMC_NAND_TREA_CYCLES(tmgs.t_rea) | + PL35X_SMC_NAND_TWP_CYCLES(tmgs.t_wp) | + PL35X_SMC_NAND_TCLR_CYCLES(tmgs.t_clr) | + PL35X_SMC_NAND_TAR_CYCLES(tmgs.t_ar) | + PL35X_SMC_NAND_TRR_CYCLES(tmgs.t_rr); + + return 0; +} + +static void pl35x_smc_set_ecc_pg_size(struct pl35x_nandc *nfc, + struct nand_chip *chip, + unsigned int pg_sz) +{ + struct pl35x_nand *plnand = to_pl35x_nand(chip); + u32 sz; + + switch (pg_sz) { + case SZ_512: + sz = 1; + break; + case SZ_1K: + sz = 2; + break; + case SZ_2K: + sz = 3; + break; + default: + sz = 0; + break; + } + + plnand->ecc_cfg = readl(nfc->conf_regs + PL35X_SMC_ECC_CFG); + plnand->ecc_cfg &= ~PL35X_SMC_ECC_CFG_PGSIZE_MASK; + plnand->ecc_cfg |= sz; + writel(plnand->ecc_cfg, nfc->conf_regs + PL35X_SMC_ECC_CFG); +} + +static int pl35x_nand_init_hw_ecc_controller(struct pl35x_nandc *nfc, + struct nand_chip *chip) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + int ret = 0; + + if (mtd->writesize < SZ_512 || mtd->writesize > SZ_2K) { + dev_err(nfc->dev, + "The hardware ECC engine is limited to pages up to 2kiB\n"); + return -EOPNOTSUPP; + } + + chip->ecc.strength = 1; + chip->ecc.bytes = 3; + chip->ecc.size = SZ_512; + chip->ecc.steps = mtd->writesize / chip->ecc.size; + chip->ecc.read_page = pl35x_nand_read_page_hwecc; + chip->ecc.write_page = pl35x_nand_write_page_hwecc; + chip->ecc.write_page_raw = nand_monolithic_write_page_raw; + pl35x_smc_set_ecc_pg_size(nfc, chip, mtd->writesize); + + nfc->ecc_buf = devm_kmalloc(nfc->dev, chip->ecc.bytes * chip->ecc.steps, + GFP_KERNEL); + if (!nfc->ecc_buf) + return -ENOMEM; + + switch (mtd->oobsize) { + case 16: + /* Legacy Xilinx layout */ + mtd_set_ooblayout(mtd, &pl35x_ecc_ooblayout16_ops); + chip->bbt_options |= NAND_BBT_NO_OOB_BBM; + break; + case 64: + mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout()); + break; + default: + dev_err(nfc->dev, "Unsupported OOB size\n"); + return -EOPNOTSUPP; + } + + return ret; +} + +static int pl35x_nand_attach_chip(struct nand_chip *chip) +{ + const struct nand_ecc_props *requirements = + nanddev_get_ecc_requirements(&chip->base); + struct pl35x_nandc *nfc = to_pl35x_nandc(chip->controller); + struct pl35x_nand *plnand = to_pl35x_nand(chip); + struct mtd_info *mtd = nand_to_mtd(chip); + int ret; + + if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_NONE && + (!chip->ecc.size || !chip->ecc.strength)) { + if (requirements->step_size && requirements->strength) { + chip->ecc.size = requirements->step_size; + chip->ecc.strength = requirements->strength; + } else { + dev_info(nfc->dev, + "No minimum ECC strength, using 1b/512B\n"); + chip->ecc.size = 512; + chip->ecc.strength = 1; + } + } + + if (mtd->writesize <= SZ_512) + plnand->addr_cycles = 1; + else + plnand->addr_cycles = 2; + + if (chip->options & NAND_ROW_ADDR_3) + plnand->addr_cycles += 3; + else + plnand->addr_cycles += 2; + + switch (chip->ecc.engine_type) { + case NAND_ECC_ENGINE_TYPE_ON_DIE: + /* Keep these legacy BBT descriptors for ON_DIE situations */ + chip->bbt_td = &bbt_main_descr; + chip->bbt_md = &bbt_mirror_descr; + fallthrough; + case NAND_ECC_ENGINE_TYPE_NONE: + case NAND_ECC_ENGINE_TYPE_SOFT: + break; + case NAND_ECC_ENGINE_TYPE_ON_HOST: + ret = pl35x_nand_init_hw_ecc_controller(nfc, chip); + if (ret) + return ret; + break; + default: + dev_err(nfc->dev, "Unsupported ECC mode: %d\n", + chip->ecc.engine_type); + return -EINVAL; + } + + return 0; +} + +static const struct nand_controller_ops pl35x_nandc_ops = { + .attach_chip = pl35x_nand_attach_chip, + .exec_op = pl35x_nfc_exec_op, + .setup_interface = pl35x_nfc_setup_interface, +}; + +static int pl35x_nand_reset_state(struct pl35x_nandc *nfc) +{ + int ret; + + /* Disable interrupts and clear their status */ + writel(PL35X_SMC_MEMC_CFG_CLR_INT_CLR_1 | + PL35X_SMC_MEMC_CFG_CLR_ECC_INT_DIS_1 | + PL35X_SMC_MEMC_CFG_CLR_INT_DIS_1, + nfc->conf_regs + PL35X_SMC_MEMC_CFG_CLR); + + /* Set default bus width to 8-bit */ + ret = pl35x_smc_set_buswidth(nfc, PL35X_SMC_OPMODE_BW_8); + if (ret) + return ret; + + /* Ensure the ECC controller is bypassed by default */ + ret = pl35x_smc_set_ecc_mode(nfc, NULL, PL35X_SMC_ECC_CFG_MODE_BYPASS); + if (ret) + return ret; + + /* + * Configure the commands that the ECC block uses to detect the + * operations it should start/end. + */ + writel(PL35X_SMC_ECC_CMD1_WRITE(NAND_CMD_SEQIN) | + PL35X_SMC_ECC_CMD1_READ(NAND_CMD_READ0) | + PL35X_SMC_ECC_CMD1_READ_END(NAND_CMD_READSTART) | + PL35X_SMC_ECC_CMD1_READ_END_VALID(NAND_CMD_READ1), + nfc->conf_regs + PL35X_SMC_ECC_CMD1); + writel(PL35X_SMC_ECC_CMD2_WRITE_COL_CHG(NAND_CMD_RNDIN) | + PL35X_SMC_ECC_CMD2_READ_COL_CHG(NAND_CMD_RNDOUT) | + PL35X_SMC_ECC_CMD2_READ_COL_CHG_END(NAND_CMD_RNDOUTSTART) | + PL35X_SMC_ECC_CMD2_READ_COL_CHG_END_VALID(NAND_CMD_READ1), + nfc->conf_regs + PL35X_SMC_ECC_CMD2); + + return 0; +} + +static int pl35x_nand_chip_init(struct pl35x_nandc *nfc, + struct device_node *np) +{ + struct pl35x_nand *plnand; + struct nand_chip *chip; + struct mtd_info *mtd; + int cs, ret; + + plnand = devm_kzalloc(nfc->dev, sizeof(*plnand), GFP_KERNEL); + if (!plnand) + return -ENOMEM; + + ret = of_property_read_u32(np, "reg", &cs); + if (ret) + return ret; + + if (cs >= PL35X_NAND_MAX_CS) { + dev_err(nfc->dev, "Wrong CS %d\n", cs); + return -EINVAL; + } + + if (test_and_set_bit(cs, &nfc->assigned_cs)) { + dev_err(nfc->dev, "Already assigned CS %d\n", cs); + return -EINVAL; + } + + plnand->cs = cs; + + chip = &plnand->chip; + chip->options = NAND_BUSWIDTH_AUTO | NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE; + chip->bbt_options = NAND_BBT_USE_FLASH; + chip->controller = &nfc->controller; + mtd = nand_to_mtd(chip); + mtd->dev.parent = nfc->dev; + nand_set_flash_node(chip, nfc->dev->of_node); + if (!mtd->name) { + mtd->name = devm_kasprintf(nfc->dev, GFP_KERNEL, + "%s", PL35X_NANDC_DRIVER_NAME); + if (!mtd->name) { + dev_err(nfc->dev, "Failed to allocate mtd->name\n"); + return -ENOMEM; + } + } + + ret = nand_scan(chip, 1); + if (ret) + return ret; + + ret = mtd_device_register(mtd, NULL, 0); + if (ret) { + nand_cleanup(chip); + return ret; + } + + list_add_tail(&plnand->node, &nfc->chips); + + return ret; +} + +static void pl35x_nand_chips_cleanup(struct pl35x_nandc *nfc) +{ + struct pl35x_nand *plnand, *tmp; + struct nand_chip *chip; + int ret; + + list_for_each_entry_safe(plnand, tmp, &nfc->chips, node) { + chip = &plnand->chip; + ret = mtd_device_unregister(nand_to_mtd(chip)); + WARN_ON(ret); + nand_cleanup(chip); + list_del(&plnand->node); + } +} + +static int pl35x_nand_chips_init(struct pl35x_nandc *nfc) +{ + struct device_node *np = nfc->dev->of_node, *nand_np; + int nchips = of_get_child_count(np); + int ret; + + if (!nchips || nchips > PL35X_NAND_MAX_CS) { + dev_err(nfc->dev, "Incorrect number of NAND chips (%d)\n", + nchips); + return -EINVAL; + } + + for_each_child_of_node(np, nand_np) { + ret = pl35x_nand_chip_init(nfc, nand_np); + if (ret) { + of_node_put(nand_np); + pl35x_nand_chips_cleanup(nfc); + break; + } + } + + return ret; +} + +static int pl35x_nand_probe(struct platform_device *pdev) +{ + struct device *smc_dev = pdev->dev.parent; + struct amba_device *smc_amba = to_amba_device(smc_dev); + struct pl35x_nandc *nfc; + u32 ret; + + nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL); + if (!nfc) + return -ENOMEM; + + nfc->dev = &pdev->dev; + nand_controller_init(&nfc->controller); + nfc->controller.ops = &pl35x_nandc_ops; + INIT_LIST_HEAD(&nfc->chips); + + nfc->conf_regs = devm_ioremap_resource(&smc_amba->dev, &smc_amba->res); + if (IS_ERR(nfc->conf_regs)) + return PTR_ERR(nfc->conf_regs); + + nfc->io_regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(nfc->io_regs)) + return PTR_ERR(nfc->io_regs); + + ret = pl35x_nand_reset_state(nfc); + if (ret) + return ret; + + ret = pl35x_nand_chips_init(nfc); + if (ret) + return ret; + + platform_set_drvdata(pdev, nfc); + + return 0; +} + +static int pl35x_nand_remove(struct platform_device *pdev) +{ + struct pl35x_nandc *nfc = platform_get_drvdata(pdev); + + pl35x_nand_chips_cleanup(nfc); + + return 0; +} + +static const struct of_device_id pl35x_nand_of_match[] = { + { .compatible = "arm,pl353-nand-r2p1" }, + {}, +}; +MODULE_DEVICE_TABLE(of, pl35x_nand_of_match); + +static struct platform_driver pl35x_nandc_driver = { + .probe = pl35x_nand_probe, + .remove = pl35x_nand_remove, + .driver = { + .name = PL35X_NANDC_DRIVER_NAME, + .of_match_table = pl35x_nand_of_match, + }, +}; +module_platform_driver(pl35x_nandc_driver); + +MODULE_AUTHOR("Xilinx, Inc."); +MODULE_ALIAS("platform:" PL35X_NANDC_DRIVER_NAME); +MODULE_DESCRIPTION("ARM PL35X NAND controller driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index a64fb6ce915dae..ef0badea4f4158 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -734,6 +734,7 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i { struct nand_chip *chip = &host->chip; u32 cmd, cfg0, cfg1, ecc_bch_cfg; + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); if (read) { if (host->use_ecc) @@ -762,7 +763,8 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read, i nandc_set_reg(chip, NAND_DEV0_CFG0, cfg0); nandc_set_reg(chip, NAND_DEV0_CFG1, cfg1); nandc_set_reg(chip, NAND_DEV0_ECC_CFG, ecc_bch_cfg); - nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg); + if (!nandc->props->qpic_v2) + nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg); nandc_set_reg(chip, NAND_FLASH_STATUS, host->clrflashstatus); nandc_set_reg(chip, NAND_READ_STATUS, host->clrreadstatus); nandc_set_reg(chip, NAND_EXEC_CMD, 1); @@ -1133,7 +1135,8 @@ static void config_nand_page_read(struct nand_chip *chip) write_reg_dma(nandc, NAND_ADDR0, 2, 0); write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0); + if (!nandc->props->qpic_v2) + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, 0); write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, 0); write_reg_dma(nandc, NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL); @@ -1191,8 +1194,9 @@ static void config_nand_page_write(struct nand_chip *chip) write_reg_dma(nandc, NAND_ADDR0, 2, 0); write_reg_dma(nandc, NAND_DEV0_CFG0, 3, 0); - write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, - NAND_BAM_NEXT_SGL); + if (!nandc->props->qpic_v2) + write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1, + NAND_BAM_NEXT_SGL); } /* @@ -1248,7 +1252,8 @@ static int nandc_param(struct qcom_nand_host *host) | 2 << WR_RD_BSY_GAP | 0 << WIDE_FLASH | 1 << DEV0_CFG1_ECC_DISABLE); - nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); + if (!nandc->props->qpic_v2) + nandc_set_reg(chip, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE); /* configure CMD1 and VLD for ONFI param probing in QPIC v1 */ if (!nandc->props->qpic_v2) { @@ -1850,8 +1855,7 @@ static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf, * ERASED_CW bits are set. */ if (host->bch_enabled) { - erased = (erased_cw & ERASED_CW) == ERASED_CW ? - true : false; + erased = (erased_cw & ERASED_CW) == ERASED_CW; /* * For RS ECC, HW reports the erased CW by placing * special characters at certain offsets in the buffer. @@ -2689,7 +2693,8 @@ static int qcom_nand_attach_chip(struct nand_chip *chip) | ecc_mode << ECC_MODE | host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH; - host->ecc_buf_cfg = 0x203 << NUM_STEPS; + if (!nandc->props->qpic_v2) + host->ecc_buf_cfg = 0x203 << NUM_STEPS; host->clrflashstatus = FS_READY_BSY_N; host->clrreadstatus = 0xc0; @@ -2882,7 +2887,7 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) return 0; } -static const char * const probes[] = { "qcomsmem", NULL }; +static const char * const probes[] = { "cmdlinepart", "ofpart", "qcomsmem", NULL }; static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, struct qcom_nand_host *host, diff --git a/drivers/mtd/nand/raw/r852.c b/drivers/mtd/nand/raw/r852.c index ebe859ca49cbfa..ed0cf732d20e40 100644 --- a/drivers/mtd/nand/raw/r852.c +++ b/drivers/mtd/nand/raw/r852.c @@ -583,8 +583,8 @@ static void r852_update_card_detect(struct r852_device *dev) r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); } -static ssize_t r852_media_type_show(struct device *sys_dev, - struct device_attribute *attr, char *buf) +static ssize_t media_type_show(struct device *sys_dev, + struct device_attribute *attr, char *buf) { struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); struct r852_device *dev = r852_get_dev(mtd); @@ -593,8 +593,7 @@ static ssize_t r852_media_type_show(struct device *sys_dev, strcpy(buf, data); return strlen(data); } - -static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); +static DEVICE_ATTR_RO(media_type); /* Detect properties of card in slot */ diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index 923a9e236fcf62..ea953e31933ebb 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1972,10 +1972,8 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc, sunxi_nand = devm_kzalloc(dev, struct_size(sunxi_nand, sels, nsels), GFP_KERNEL); - if (!sunxi_nand) { - dev_err(dev, "could not allocate chip\n"); + if (!sunxi_nand) return -ENOMEM; - } sunxi_nand->nsels = nsels; diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 3131fae0c7152a..446ba8d43fbca9 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -138,20 +138,12 @@ int spinand_select_target(struct spinand_device *spinand, unsigned int target) return 0; } -static int spinand_init_cfg_cache(struct spinand_device *spinand) +static int spinand_read_cfg(struct spinand_device *spinand) { struct nand_device *nand = spinand_to_nand(spinand); - struct device *dev = &spinand->spimem->spi->dev; unsigned int target; int ret; - spinand->cfg_cache = devm_kcalloc(dev, - nand->memorg.ntargets, - sizeof(*spinand->cfg_cache), - GFP_KERNEL); - if (!spinand->cfg_cache) - return -ENOMEM; - for (target = 0; target < nand->memorg.ntargets; target++) { ret = spinand_select_target(spinand, target); if (ret) @@ -170,6 +162,21 @@ static int spinand_init_cfg_cache(struct spinand_device *spinand) return 0; } +static int spinand_init_cfg_cache(struct spinand_device *spinand) +{ + struct nand_device *nand = spinand_to_nand(spinand); + struct device *dev = &spinand->spimem->spi->dev; + + spinand->cfg_cache = devm_kcalloc(dev, + nand->memorg.ntargets, + sizeof(*spinand->cfg_cache), + GFP_KERNEL); + if (!spinand->cfg_cache) + return -ENOMEM; + + return 0; +} + static int spinand_init_quad_enable(struct spinand_device *spinand) { bool enable = false; @@ -290,6 +297,8 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, { struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv; struct spinand_device *spinand = nand_to_spinand(nand); + struct mtd_info *mtd = spinand_to_mtd(spinand); + int ret; if (req->mode == MTD_OPS_RAW) return 0; @@ -299,7 +308,13 @@ static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand, return 0; /* Finish a page write: check the status, report errors/bitflips */ - return spinand_check_ecc_status(spinand, engine_conf->status); + ret = spinand_check_ecc_status(spinand, engine_conf->status); + if (ret == -EBADMSG) + mtd->ecc_stats.failed++; + else if (ret > 0) + mtd->ecc_stats.corrected += ret; + + return ret; } static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = { @@ -635,13 +650,10 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, if (ret < 0 && ret != -EBADMSG) break; - if (ret == -EBADMSG) { + if (ret == -EBADMSG) ecc_failed = true; - mtd->ecc_stats.failed++; - } else { - mtd->ecc_stats.corrected += ret; + else max_bitflips = max_t(unsigned int, max_bitflips, ret); - } ret = 0; ops->retlen += iter.req.datalen; @@ -1093,12 +1105,71 @@ static int spinand_detect(struct spinand_device *spinand) return 0; } +static int spinand_init_flash(struct spinand_device *spinand) +{ + struct device *dev = &spinand->spimem->spi->dev; + struct nand_device *nand = spinand_to_nand(spinand); + int ret, i; + + ret = spinand_read_cfg(spinand); + if (ret) + return ret; + + ret = spinand_init_quad_enable(spinand); + if (ret) + return ret; + + ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); + if (ret) + return ret; + + ret = spinand_manufacturer_init(spinand); + if (ret) { + dev_err(dev, + "Failed to initialize the SPI NAND chip (err = %d)\n", + ret); + return ret; + } + + /* After power up, all blocks are locked, so unlock them here. */ + for (i = 0; i < nand->memorg.ntargets; i++) { + ret = spinand_select_target(spinand, i); + if (ret) + break; + + ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); + if (ret) + break; + } + + if (ret) + spinand_manufacturer_cleanup(spinand); + + return ret; +} + +static void spinand_mtd_resume(struct mtd_info *mtd) +{ + struct spinand_device *spinand = mtd_to_spinand(mtd); + int ret; + + ret = spinand_reset_op(spinand); + if (ret) + return; + + ret = spinand_init_flash(spinand); + if (ret) + return; + + spinand_ecc_enable(spinand, false); +} + static int spinand_init(struct spinand_device *spinand) { struct device *dev = &spinand->spimem->spi->dev; struct mtd_info *mtd = spinand_to_mtd(spinand); struct nand_device *nand = mtd_to_nanddev(mtd); - int ret, i; + int ret; /* * We need a scratch buffer because the spi_mem interface requires that @@ -1131,22 +1202,10 @@ static int spinand_init(struct spinand_device *spinand) if (ret) goto err_free_bufs; - ret = spinand_init_quad_enable(spinand); + ret = spinand_init_flash(spinand); if (ret) goto err_free_bufs; - ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); - if (ret) - goto err_free_bufs; - - ret = spinand_manufacturer_init(spinand); - if (ret) { - dev_err(dev, - "Failed to initialize the SPI NAND chip (err = %d)\n", - ret); - goto err_free_bufs; - } - ret = spinand_create_dirmaps(spinand); if (ret) { dev_err(dev, @@ -1155,17 +1214,6 @@ static int spinand_init(struct spinand_device *spinand) goto err_manuf_cleanup; } - /* After power up, all blocks are locked, so unlock them here. */ - for (i = 0; i < nand->memorg.ntargets; i++) { - ret = spinand_select_target(spinand, i); - if (ret) - goto err_manuf_cleanup; - - ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); - if (ret) - goto err_manuf_cleanup; - } - ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); if (ret) goto err_manuf_cleanup; @@ -1186,6 +1234,7 @@ static int spinand_init(struct spinand_device *spinand) mtd->_block_isreserved = spinand_mtd_block_isreserved; mtd->_erase = spinand_mtd_erase; mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks; + mtd->_resume = spinand_mtd_resume; if (nand->ecc.engine) { ret = mtd_ooblayout_count_freebytes(mtd); diff --git a/drivers/mtd/nand/spi/macronix.c b/drivers/mtd/nand/spi/macronix.c index 6701aaa21a49df..a9890350db0293 100644 --- a/drivers/mtd/nand/spi/macronix.c +++ b/drivers/mtd/nand/spi/macronix.c @@ -186,6 +186,118 @@ static const struct spinand_info macronix_spinand_table[] = { 0 /*SPINAND_HAS_QE_BIT*/, SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, mx35lf1ge4ab_ecc_get_status)), + + SPINAND_INFO("MX35LF2G14AC", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x20), + NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF4G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb5), + NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 2, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF4GE4AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xb7), + NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF2G14AC", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa0), + NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 2, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF2G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa4), + NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF2GE4AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa6), + NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF2GE4AC", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xa2), + NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF1G14AC", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x90), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF1G24AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x94), + NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF1GE4AD", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x96), + NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(8, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + SPINAND_INFO("MX35UF1GE4AC", + SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x92), + NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), + NAND_ECCREQ(4, 512), + SPINAND_INFO_OP_VARIANTS(&read_cache_variants, + &write_cache_variants, + &update_cache_variants), + SPINAND_HAS_QE_BIT, + SPINAND_ECCINFO(&mx35lfxge4ab_ooblayout, + mx35lf1ge4ab_ecc_get_status)), + }; static const struct spinand_manufacturer_ops macronix_spinand_manuf_ops = { diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index bcd0094f172dcc..913db0dd6a8deb 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c @@ -619,7 +619,6 @@ static inline u16 NFTL_findwriteunit(struct NFTLrecord *nftl, unsigned block) return BLOCK_NIL; } //printk("Restarting scan\n"); - lastEUN = BLOCK_NIL; continue; } diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c index 444a77bb769213..75e86ed3e6789b 100644 --- a/drivers/mtd/nftlmount.c +++ b/drivers/mtd/nftlmount.c @@ -188,17 +188,14 @@ device is already correct. /* memory alloc */ nftl->EUNtable = kmalloc_array(nftl->nb_blocks, sizeof(u16), GFP_KERNEL); - if (!nftl->EUNtable) { - printk(KERN_NOTICE "NFTL: allocation of EUNtable failed\n"); + if (!nftl->EUNtable) return -ENOMEM; - } nftl->ReplUnitTable = kmalloc_array(nftl->nb_blocks, sizeof(u16), GFP_KERNEL); if (!nftl->ReplUnitTable) { kfree(nftl->EUNtable); - printk(KERN_NOTICE "NFTL: allocation of ReplUnitTable failed\n"); return -ENOMEM; } @@ -269,7 +266,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int buf = kmalloc(SECTORSIZE + mtd->oobsize, GFP_KERNEL); if (!buf) - return -1; + return -ENOMEM; ret = -1; for (i = 0; i < len; i += SECTORSIZE) { diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig index 9babe678c41b08..337ea8b9a4c3b6 100644 --- a/drivers/mtd/parsers/Kconfig +++ b/drivers/mtd/parsers/Kconfig @@ -115,7 +115,7 @@ config MTD_AFS_PARTS config MTD_PARSER_TRX tristate "Parser for TRX format partitions" - depends on MTD && (BCM47XX || ARCH_BCM_5301X || COMPILE_TEST) + depends on MTD && (BCM47XX || ARCH_BCM_5301X || ARCH_MEDIATEK || COMPILE_TEST) help TRX is a firmware format used by Broadcom on their devices. It may contain up to 3/4 partitions (depending on the version). diff --git a/drivers/mtd/parsers/parser_trx.c b/drivers/mtd/parsers/parser_trx.c index 8541182134d442..4814cf218e178f 100644 --- a/drivers/mtd/parsers/parser_trx.c +++ b/drivers/mtd/parsers/parser_trx.c @@ -51,13 +51,20 @@ static int parser_trx_parse(struct mtd_info *mtd, const struct mtd_partition **pparts, struct mtd_part_parser_data *data) { + struct device_node *np = mtd_get_of_node(mtd); struct mtd_partition *parts; struct mtd_partition *part; struct trx_header trx; size_t bytes_read; uint8_t curr_part = 0, i = 0; + uint32_t trx_magic = TRX_MAGIC; int err; + /* Get different magic from device tree if specified */ + err = of_property_read_u32(np, "brcm,trx-magic", &trx_magic); + if (err != 0 && err != -EINVAL) + pr_err("failed to parse \"brcm,trx-magic\" DT attribute, using default: %d\n", err); + parts = kcalloc(TRX_PARSER_MAX_PARTS, sizeof(struct mtd_partition), GFP_KERNEL); if (!parts) @@ -70,7 +77,7 @@ static int parser_trx_parse(struct mtd_info *mtd, return err; } - if (trx.magic != TRX_MAGIC) { + if (trx.magic != trx_magic) { kfree(parts); return -ENOENT; } diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c index d9083308f6ba61..06a818cd2433f1 100644 --- a/drivers/mtd/parsers/qcomsmempart.c +++ b/drivers/mtd/parsers/qcomsmempart.c @@ -159,6 +159,15 @@ static int parse_qcomsmem_part(struct mtd_info *mtd, return ret; } +static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts, + int nr_parts) +{ + int i; + + for (i = 0; i < nr_parts; i++) + kfree(pparts[i].name); +} + static const struct of_device_id qcomsmem_of_match_table[] = { { .compatible = "qcom,smem-part" }, {}, @@ -167,6 +176,7 @@ MODULE_DEVICE_TABLE(of, qcomsmem_of_match_table); static struct mtd_part_parser mtd_parser_qcomsmem = { .parse_fn = parse_qcomsmem_part, + .cleanup = parse_qcomsmem_cleanup, .name = "qcomsmem", .of_match_table = qcomsmem_of_match_table, }; diff --git a/drivers/mtd/parsers/redboot.c b/drivers/mtd/parsers/redboot.c index 91146bdc471321..feb44a573d447a 100644 --- a/drivers/mtd/parsers/redboot.c +++ b/drivers/mtd/parsers/redboot.c @@ -17,15 +17,15 @@ #include struct fis_image_desc { - unsigned char name[16]; // Null terminated name - uint32_t flash_base; // Address within FLASH of image - uint32_t mem_base; // Address in memory where it executes - uint32_t size; // Length of image - uint32_t entry_point; // Execution entry point - uint32_t data_length; // Length of actual data - unsigned char _pad[256-(16+7*sizeof(uint32_t))]; - uint32_t desc_cksum; // Checksum over image descriptor - uint32_t file_cksum; // Checksum over image data + unsigned char name[16]; // Null terminated name + u32 flash_base; // Address within FLASH of image + u32 mem_base; // Address in memory where it executes + u32 size; // Length of image + u32 entry_point; // Execution entry point + u32 data_length; // Length of actual data + unsigned char _pad[256 - (16 + 7 * sizeof(u32))]; + u32 desc_cksum; // Checksum over image descriptor + u32 file_cksum; // Checksum over image data }; struct fis_list { @@ -45,6 +45,7 @@ static inline int redboot_checksum(struct fis_image_desc *img) static void parse_redboot_of(struct mtd_info *master) { struct device_node *np; + struct device_node *npart; u32 dirblock; int ret; @@ -52,7 +53,11 @@ static void parse_redboot_of(struct mtd_info *master) if (!np) return; - ret = of_property_read_u32(np, "fis-index-block", &dirblock); + npart = of_get_child_by_name(np, "partitions"); + if (!npart) + return; + + ret = of_property_read_u32(npart, "fis-index-block", &dirblock); if (ret) return; @@ -85,12 +90,12 @@ static int parse_redboot_partitions(struct mtd_info *master, parse_redboot_of(master); - if ( directory < 0 ) { + if (directory < 0) { offset = master->size + directory * master->erasesize; while (mtd_block_isbad(master, offset)) { if (!offset) { - nogood: - printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); +nogood: + pr_notice("Failed to find a non-bad block to check for RedBoot partition table\n"); return -EIO; } offset -= master->erasesize; @@ -108,8 +113,8 @@ static int parse_redboot_partitions(struct mtd_info *master, if (!buf) return -ENOMEM; - printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", - master->name, offset); + pr_notice("Searching for RedBoot partition table in %s at offset 0x%lx\n", + master->name, offset); ret = mtd_read(master, offset, master->erasesize, &retlen, (void *)buf); @@ -145,14 +150,13 @@ static int parse_redboot_partitions(struct mtd_info *master, && swab32(buf[i].size) < master->erasesize)) { int j; /* Update numslots based on actual FIS directory size */ - numslots = swab32(buf[i].size) / sizeof (struct fis_image_desc); + numslots = swab32(buf[i].size) / sizeof(struct fis_image_desc); for (j = 0; j < numslots; ++j) { - /* A single 0xff denotes a deleted entry. * Two of them in a row is the end of the table. */ if (buf[j].name[0] == 0xff) { - if (buf[j].name[1] == 0xff) { + if (buf[j].name[1] == 0xff) { break; } else { continue; @@ -179,8 +183,8 @@ static int parse_redboot_partitions(struct mtd_info *master, } if (i == numslots) { /* Didn't find it */ - printk(KERN_NOTICE "No RedBoot partition table detected in %s\n", - master->name); + pr_notice("No RedBoot partition table detected in %s\n", + master->name); ret = 0; goto out; } @@ -199,7 +203,7 @@ static int parse_redboot_partitions(struct mtd_info *master, break; new_fl = kmalloc(sizeof(struct fis_list), GFP_KERNEL); - namelen += strlen(buf[i].name)+1; + namelen += strlen(buf[i].name) + 1; if (!new_fl) { ret = -ENOMEM; goto out; @@ -208,13 +212,13 @@ static int parse_redboot_partitions(struct mtd_info *master, if (data && data->origin) buf[i].flash_base -= data->origin; else - buf[i].flash_base &= master->size-1; + buf[i].flash_base &= master->size - 1; /* I'm sure the JFFS2 code has done me permanent damage. * I now think the following is _normal_ */ prev = &fl; - while(*prev && (*prev)->img->flash_base < new_fl->img->flash_base) + while (*prev && (*prev)->img->flash_base < new_fl->img->flash_base) prev = &(*prev)->next; new_fl->next = *prev; *prev = new_fl; @@ -234,7 +238,7 @@ static int parse_redboot_partitions(struct mtd_info *master, } } #endif - parts = kzalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL); + parts = kzalloc(sizeof(*parts) * nrparts + nulllen + namelen, GFP_KERNEL); if (!parts) { ret = -ENOMEM; @@ -243,23 +247,22 @@ static int parse_redboot_partitions(struct mtd_info *master, nullname = (char *)&parts[nrparts]; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED - if (nulllen > 0) { + if (nulllen > 0) strcpy(nullname, nullstring); - } #endif names = nullname + nulllen; - i=0; + i = 0; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if (fl->img->flash_base) { - parts[0].name = nullname; - parts[0].size = fl->img->flash_base; - parts[0].offset = 0; + parts[0].name = nullname; + parts[0].size = fl->img->flash_base; + parts[0].offset = 0; i++; } #endif - for ( ; iimg->size; parts[i].offset = fl->img->flash_base; parts[i].name = names; @@ -267,17 +270,17 @@ static int parse_redboot_partitions(struct mtd_info *master, strcpy(names, fl->img->name); #ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY if (!memcmp(names, "RedBoot", 8) || - !memcmp(names, "RedBoot config", 15) || - !memcmp(names, "FIS directory", 14)) { + !memcmp(names, "RedBoot config", 15) || + !memcmp(names, "FIS directory", 14)) { parts[i].mask_flags = MTD_WRITEABLE; } #endif - names += strlen(names)+1; + names += strlen(names) + 1; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED - if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) { + if (fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) { i++; - parts[i].offset = parts[i-1].size + parts[i-1].offset; + parts[i].offset = parts[i - 1].size + parts[i - 1].offset; parts[i].size = fl->next->img->flash_base - parts[i].offset; parts[i].name = nullname; } @@ -291,6 +294,7 @@ static int parse_redboot_partitions(struct mtd_info *master, out: while (fl) { struct fis_list *old = fl; + fl = fl->next; kfree(old); } diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c index cce3bf6f99b4ad..6e0d5ce9b01087 100644 --- a/drivers/mtd/rfd_ftl.c +++ b/drivers/mtd/rfd_ftl.c @@ -192,11 +192,8 @@ static int scan_header(struct partition *part) part->sector_map = vmalloc(array_size(sizeof(u_long), part->sector_count)); - if (!part->sector_map) { - printk(KERN_ERR PREFIX "'%s': unable to allocate memory for " - "sector map", part->mbd.mtd->name); + if (!part->sector_map) goto err; - } for (i=0; isector_count; i++) part->sector_map[i] = -1; diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 4d1ae25507ab38..0cff2cda1b5a08 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c @@ -265,7 +265,8 @@ static int sm_read_sector(struct sm_ftl *ftl, again: if (try++) { /* Avoid infinite recursion on CIS reads, sm_recheck_media - won't help anyway */ + * won't help anyway + */ if (zone == 0 && block == ftl->cis_block && boffset == ftl->cis_boffset) return ret; @@ -276,7 +277,8 @@ static int sm_read_sector(struct sm_ftl *ftl, } /* Unfortunately, oob read will _always_ succeed, - despite card removal..... */ + * despite card removal..... + */ ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); /* Test for unknown errors */ @@ -411,9 +413,10 @@ static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf, /* If write fails. try to erase the block */ /* This is safe, because we never write in blocks - that contain valuable data. - This is intended to repair block that are marked - as erased, but that isn't fully erased*/ + * that contain valuable data. + * This is intended to repair block that are marked + * as erased, but that isn't fully erased + */ if (sm_erase_block(ftl, zone, block, 0)) return -EIO; @@ -448,7 +451,8 @@ static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block) /* We aren't checking the return value, because we don't care */ /* This also fails on fake xD cards, but I guess these won't expose - any bad blocks till fail completely */ + * any bad blocks till fail completely + */ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) sm_write_sector(ftl, zone, block, boffset, NULL, &oob); } @@ -505,7 +509,8 @@ static int sm_check_block(struct sm_ftl *ftl, int zone, int block) /* First just check that block doesn't look fishy */ /* Only blocks that are valid or are sliced in two parts, are - accepted */ + * accepted + */ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) { @@ -554,7 +559,8 @@ static const uint8_t cis_signature[] = { 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20 }; /* Find out media parameters. - * This ideally has to be based on nand id, but for now device size is enough */ + * This ideally has to be based on nand id, but for now device size is enough + */ static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd) { int i; @@ -607,7 +613,8 @@ static int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd) } /* Minimum xD size is 16MiB. Also, all xD cards have standard zone - sizes. SmartMedia cards exist up to 128 MiB and have same layout*/ + * sizes. SmartMedia cards exist up to 128 MiB and have same layout + */ if (size_in_megs >= 16) { ftl->zone_count = size_in_megs / 16; ftl->zone_size = 1024; @@ -782,7 +789,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num) } /* Test to see if block is erased. It is enough to test - first sector, because erase happens in one shot */ + * first sector, because erase happens in one shot + */ if (sm_block_erased(&oob)) { kfifo_in(&zone->free_sectors, (unsigned char *)&block, 2); @@ -792,7 +800,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num) /* If block is marked as bad, skip it */ /* This assumes we can trust first sector*/ /* However the way the block valid status is defined, ensures - very low probability of failure here */ + * very low probability of failure here + */ if (!sm_block_valid(&oob)) { dbg("PH %04d <-> ", block); continue; @@ -803,7 +812,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num) /* Invalid LBA means that block is damaged. */ /* We can try to erase it, or mark it as bad, but - lets leave that to recovery application */ + * lets leave that to recovery application + */ if (lba == -2 || lba >= ftl->max_lba) { dbg("PH %04d <-> LBA %04d(bad)", block, lba); continue; @@ -811,7 +821,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num) /* If there is no collision, - just put the sector in the FTL table */ + * just put the sector in the FTL table + */ if (zone->lba_to_phys_table[lba] < 0) { dbg_verbose("PH %04d <-> LBA %04d", block, lba); zone->lba_to_phys_table[lba] = block; @@ -834,9 +845,9 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num) } /* If both blocks are valid and share same LBA, it means that - they hold different versions of same data. It not - known which is more recent, thus just erase one of them - */ + * they hold different versions of same data. It not + * known which is more recent, thus just erase one of them + */ sm_printk("both blocks are valid, erasing the later"); sm_erase_block(ftl, zone_num, block, 1); } @@ -845,7 +856,8 @@ static int sm_init_zone(struct sm_ftl *ftl, int zone_num) zone->initialized = 1; /* No free sectors, means that the zone is heavily damaged, write won't - work, but it can still can be (partially) read */ + * work, but it can still can be (partially) read + */ if (!kfifo_len(&zone->free_sectors)) { sm_printk("no free blocks in zone %d", zone_num); return 0; @@ -952,8 +964,9 @@ static int sm_cache_flush(struct sm_ftl *ftl) /* If there are no spare blocks, */ /* we could still continue by erasing/writing the current block, - but for such worn out media it doesn't worth the trouble, - and the dangers */ + * but for such worn out media it doesn't worth the trouble, + * and the dangers + */ if (kfifo_out(&zone->free_sectors, (unsigned char *)&write_sector, 2) != 2) { dbg("no free sectors for write!"); diff --git a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile index 136f245c91dcec..6b904e43937289 100644 --- a/drivers/mtd/spi-nor/Makefile +++ b/drivers/mtd/spi-nor/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -spi-nor-objs := core.o sfdp.o swp.o otp.o +spi-nor-objs := core.o sfdp.o swp.o otp.o sysfs.o spi-nor-objs += atmel.o spi-nor-objs += catalyst.o spi-nor-objs += eon.o diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c index 825610a2e9dcad..1bc53b8bb88a9d 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c @@ -74,6 +74,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, diff --git a/drivers/mtd/spi-nor/controllers/nxp-spifi.c b/drivers/mtd/spi-nor/controllers/nxp-spifi.c index 5703e831398030..2635c80231bbd9 100644 --- a/drivers/mtd/spi-nor/controllers/nxp-spifi.c +++ b/drivers/mtd/spi-nor/controllers/nxp-spifi.c @@ -326,7 +326,7 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, ctrl |= SPIFI_CTRL_DUAL; } - switch (mode & (SPI_CPHA | SPI_CPOL)) { + switch (mode & SPI_MODE_X_MASK) { case SPI_MODE_0: ctrl &= ~SPIFI_CTRL_MODE3; break; diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index bd2c7717eb1029..cc08bd707378f9 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -1318,7 +1318,7 @@ static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr) /* * Initiate the erasure of a single sector */ -static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) +int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) { int i; @@ -1411,9 +1411,7 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, continue; spi_nor_div_by_erase_size(erase, addr, &rem); - if (rem) - continue; - else + if (!rem) return erase; } @@ -2839,6 +2837,21 @@ static int spi_nor_init(struct spi_nor *nor) return 0; } +/** + * spi_nor_soft_reset() - Perform a software reset + * @nor: pointer to 'struct spi_nor' + * + * Performs a "Soft Reset and Enter Default Protocol Mode" sequence which resets + * the device to its power-on-reset state. This is useful when the software has + * made some changes to device (volatile) registers and needs to reset it before + * shutting down, for example. + * + * Not every flash supports this sequence. The same set of opcodes might be used + * for some other operation on a flash that does not support this. Support for + * this sequence can be discovered via SFDP in the BFPT table. + * + * Return: 0 on success, -errno otherwise. + */ static void spi_nor_soft_reset(struct spi_nor *nor) { struct spi_mem_op op; @@ -3444,6 +3457,7 @@ static struct spi_mem_driver spi_nor_driver = { .driver = { .name = "spi-nor", .of_match_table = spi_nor_of_table, + .dev_groups = spi_nor_sysfs_groups, }, .id_table = spi_nor_dev_ids, }, diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 28a2e0be97a3b6..3348e1dd144526 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -207,6 +207,7 @@ struct spi_nor_otp_organization { * @read: read from the SPI NOR OTP area. * @write: write to the SPI NOR OTP area. * @lock: lock an OTP region. + * @erase: erase an OTP region. * @is_locked: check if an OTP region of the SPI NOR is locked. */ struct spi_nor_otp_ops { @@ -214,6 +215,7 @@ struct spi_nor_otp_ops { int (*write)(struct spi_nor *nor, loff_t addr, size_t len, const u8 *buf); int (*lock)(struct spi_nor *nor, unsigned int region); + int (*erase)(struct spi_nor *nor, loff_t addr); int (*is_locked)(struct spi_nor *nor, unsigned int region); }; @@ -459,6 +461,16 @@ struct spi_nor_manufacturer { const struct spi_nor_fixups *fixups; }; +/** + * struct sfdp - SFDP data + * @num_dwords: number of entries in the dwords array + * @dwords: array of double words of the SFDP data + */ +struct sfdp { + size_t num_dwords; + u32 *dwords; +}; + /* Manufacturer drivers. */ extern const struct spi_nor_manufacturer spi_nor_atmel; extern const struct spi_nor_manufacturer spi_nor_catalyst; @@ -478,6 +490,8 @@ extern const struct spi_nor_manufacturer spi_nor_winbond; extern const struct spi_nor_manufacturer spi_nor_xilinx; extern const struct spi_nor_manufacturer spi_nor_xmc; +extern const struct attribute_group *spi_nor_sysfs_groups[]; + void spi_nor_spimem_setup_op(const struct spi_nor *nor, struct spi_mem_op *op, const enum spi_nor_protocol proto); @@ -503,10 +517,12 @@ ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf); ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, const u8 *buf); +int spi_nor_erase_sector(struct spi_nor *nor, u32 addr); int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf); int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len, const u8 *buf); +int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr); int spi_nor_otp_lock_sr2(struct spi_nor *nor, unsigned int region); int spi_nor_otp_is_locked_sr2(struct spi_nor *nor, unsigned int region); diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index 42c2cf31702e9d..27498ed0cc0dc2 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -49,7 +49,8 @@ static const struct flash_info macronix_parts[] = { { "mx25u4035", INFO(0xc22533, 0, 64 * 1024, 8, SECT_4K) }, { "mx25u8035", INFO(0xc22534, 0, 64 * 1024, 16, SECT_4K) }, { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, - { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K) }, + { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, SECT_4K | + SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP) }, { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | @@ -72,7 +73,7 @@ static const struct flash_info macronix_parts[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, - { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, + { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, diff --git a/drivers/mtd/spi-nor/otp.c b/drivers/mtd/spi-nor/otp.c index fcf38d26034500..983e40b19134b5 100644 --- a/drivers/mtd/spi-nor/otp.c +++ b/drivers/mtd/spi-nor/otp.c @@ -15,14 +15,21 @@ #define spi_nor_otp_n_regions(nor) ((nor)->params->otp.org->n_regions) /** - * spi_nor_otp_read_secr() - read OTP data + * spi_nor_otp_read_secr() - read security register * @nor: pointer to 'struct spi_nor' - * @from: offset to read from + * @addr: offset to read from * @len: number of bytes to read * @buf: pointer to dst buffer * - * Read OTP data from one region by using the SPINOR_OP_RSECR commands. This - * method is used on GigaDevice and Winbond flashes. + * Read a security register by using the SPINOR_OP_RSECR commands. + * + * In Winbond/GigaDevice datasheets the term "security register" stands for + * an one-time-programmable memory area, consisting of multiple bytes (usually + * 256). Thus one "security register" maps to one OTP region. + * + * This method is used on GigaDevice and Winbond flashes. + * + * Please note, the read must not span multiple registers. * * Return: number of bytes read successfully, -errno otherwise */ @@ -40,7 +47,6 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf) rdesc = nor->dirmap.rdesc; nor->read_opcode = SPINOR_OP_RSECR; - nor->addr_width = 3; nor->read_dummy = 8; nor->read_proto = SNOR_PROTO_1_1_1; nor->dirmap.rdesc = NULL; @@ -57,16 +63,20 @@ int spi_nor_otp_read_secr(struct spi_nor *nor, loff_t addr, size_t len, u8 *buf) } /** - * spi_nor_otp_write_secr() - write OTP data + * spi_nor_otp_write_secr() - write security register * @nor: pointer to 'struct spi_nor' - * @to: offset to write to + * @addr: offset to write to * @len: number of bytes to write * @buf: pointer to src buffer * - * Write OTP data to one region by using the SPINOR_OP_PSECR commands. This - * method is used on GigaDevice and Winbond flashes. + * Write a security register by using the SPINOR_OP_PSECR commands. + * + * For more information on the term "security register", see the documentation + * of spi_nor_otp_read_secr(). * - * Please note, the write must not span multiple OTP regions. + * This method is used on GigaDevice and Winbond flashes. + * + * Please note, the write must not span multiple registers. * * Return: number of bytes written successfully, -errno otherwise */ @@ -84,13 +94,12 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len, wdesc = nor->dirmap.wdesc; nor->program_opcode = SPINOR_OP_PSECR; - nor->addr_width = 3; nor->write_proto = SNOR_PROTO_1_1_1; nor->dirmap.wdesc = NULL; /* * We only support a write to one single page. For now all winbond - * flashes only have one page per OTP region. + * flashes only have one page per security register. */ ret = spi_nor_write_enable(nor); if (ret) @@ -111,6 +120,38 @@ int spi_nor_otp_write_secr(struct spi_nor *nor, loff_t addr, size_t len, return ret ?: written; } +/** + * spi_nor_otp_erase_secr() - erase a security register + * @nor: pointer to 'struct spi_nor' + * @addr: offset of the security register to be erased + * + * Erase a security register by using the SPINOR_OP_ESECR command. + * + * For more information on the term "security register", see the documentation + * of spi_nor_otp_read_secr(). + * + * This method is used on GigaDevice and Winbond flashes. + * + * Return: 0 on success, -errno otherwise + */ +int spi_nor_otp_erase_secr(struct spi_nor *nor, loff_t addr) +{ + u8 erase_opcode = nor->erase_opcode; + int ret; + + ret = spi_nor_write_enable(nor); + if (ret) + return ret; + + nor->erase_opcode = SPINOR_OP_ESECR; + ret = spi_nor_erase_sector(nor, addr); + nor->erase_opcode = erase_opcode; + if (ret) + return ret; + + return spi_nor_wait_till_ready(nor); +} + static int spi_nor_otp_lock_bit_cr(unsigned int region) { static const int lock_bits[] = { SR2_LB1, SR2_LB2, SR2_LB3 }; @@ -240,6 +281,29 @@ static int spi_nor_mtd_otp_info(struct mtd_info *mtd, size_t len, return ret; } +static int spi_nor_mtd_otp_range_is_locked(struct spi_nor *nor, loff_t ofs, + size_t len) +{ + const struct spi_nor_otp_ops *ops = nor->params->otp.ops; + unsigned int region; + int locked; + + /* + * If any of the affected OTP regions are locked the entire range is + * considered locked. + */ + for (region = spi_nor_otp_offset_to_region(nor, ofs); + region <= spi_nor_otp_offset_to_region(nor, ofs + len - 1); + region++) { + locked = ops->is_locked(nor, region); + /* take the branch it is locked or in case of an error */ + if (locked) + return locked; + } + + return 0; +} + static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs, size_t total_len, size_t *retlen, const u8 *buf, bool is_write) @@ -255,14 +319,26 @@ static int spi_nor_mtd_otp_read_write(struct mtd_info *mtd, loff_t ofs, if (ofs < 0 || ofs >= spi_nor_otp_size(nor)) return 0; + /* don't access beyond the end */ + total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs); + + if (!total_len) + return 0; + ret = spi_nor_lock_and_prep(nor); if (ret) return ret; - /* don't access beyond the end */ - total_len = min_t(size_t, total_len, spi_nor_otp_size(nor) - ofs); + if (is_write) { + ret = spi_nor_mtd_otp_range_is_locked(nor, ofs, total_len); + if (ret < 0) { + goto out; + } else if (ret) { + ret = -EROFS; + goto out; + } + } - *retlen = 0; while (total_len) { /* * The OTP regions are mapped into a contiguous area starting @@ -316,6 +392,59 @@ static int spi_nor_mtd_otp_write(struct mtd_info *mtd, loff_t to, size_t len, return spi_nor_mtd_otp_read_write(mtd, to, len, retlen, buf, true); } +static int spi_nor_mtd_otp_erase(struct mtd_info *mtd, loff_t from, size_t len) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + const struct spi_nor_otp_ops *ops = nor->params->otp.ops; + const size_t rlen = spi_nor_otp_region_len(nor); + unsigned int region; + loff_t rstart; + int ret; + + /* OTP erase is optional */ + if (!ops->erase) + return -EOPNOTSUPP; + + if (!len) + return 0; + + if (from < 0 || (from + len) > spi_nor_otp_size(nor)) + return -EINVAL; + + /* the user has to explicitly ask for whole regions */ + if (!IS_ALIGNED(len, rlen) || !IS_ALIGNED(from, rlen)) + return -EINVAL; + + ret = spi_nor_lock_and_prep(nor); + if (ret) + return ret; + + ret = spi_nor_mtd_otp_range_is_locked(nor, from, len); + if (ret < 0) { + goto out; + } else if (ret) { + ret = -EROFS; + goto out; + } + + while (len) { + region = spi_nor_otp_offset_to_region(nor, from); + rstart = spi_nor_otp_region_start(nor, region); + + ret = ops->erase(nor, rstart); + if (ret) + goto out; + + len -= rlen; + from += rlen; + } + +out: + spi_nor_unlock_and_unprep(nor); + + return ret; +} + static int spi_nor_mtd_otp_lock(struct mtd_info *mtd, loff_t from, size_t len) { struct spi_nor *nor = mtd_to_spi_nor(mtd); @@ -374,4 +503,5 @@ void spi_nor_otp_init(struct spi_nor *nor) mtd->_read_user_prot_reg = spi_nor_mtd_otp_read; mtd->_write_user_prot_reg = spi_nor_mtd_otp_write; mtd->_lock_user_prot_reg = spi_nor_mtd_otp_lock; + mtd->_erase_user_prot_reg = spi_nor_mtd_otp_erase; } diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index 23c28e91f698ea..c500c2118a5dbf 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -16,6 +16,7 @@ (((p)->parameter_table_pointer[2] << 16) | \ ((p)->parameter_table_pointer[1] << 8) | \ ((p)->parameter_table_pointer[0] << 0)) +#define SFDP_PARAM_HEADER_PARAM_LEN(p) ((p)->length * 4) #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ @@ -1245,6 +1246,8 @@ int spi_nor_parse_sfdp(struct spi_nor *nor) struct sfdp_parameter_header *param_headers = NULL; struct sfdp_header header; struct device *dev = nor->dev; + struct sfdp *sfdp; + size_t sfdp_size; size_t psize; int i, err; @@ -1267,6 +1270,9 @@ int spi_nor_parse_sfdp(struct spi_nor *nor) bfpt_header->major != SFDP_JESD216_MAJOR) return -EINVAL; + sfdp_size = SFDP_PARAM_HEADER_PTP(bfpt_header) + + SFDP_PARAM_HEADER_PARAM_LEN(bfpt_header); + /* * Allocate memory then read all parameter headers with a single * Read SFDP command. These parameter headers will actually be parsed @@ -1293,6 +1299,58 @@ int spi_nor_parse_sfdp(struct spi_nor *nor) } } + /* + * Cache the complete SFDP data. It is not (easily) possible to fetch + * SFDP after probe time and we need it for the sysfs access. + */ + for (i = 0; i < header.nph; i++) { + param_header = ¶m_headers[i]; + sfdp_size = max_t(size_t, sfdp_size, + SFDP_PARAM_HEADER_PTP(param_header) + + SFDP_PARAM_HEADER_PARAM_LEN(param_header)); + } + + /* + * Limit the total size to a reasonable value to avoid allocating too + * much memory just of because the flash returned some insane values. + */ + if (sfdp_size > PAGE_SIZE) { + dev_dbg(dev, "SFDP data (%zu) too big, truncating\n", + sfdp_size); + sfdp_size = PAGE_SIZE; + } + + sfdp = devm_kzalloc(dev, sizeof(*sfdp), GFP_KERNEL); + if (!sfdp) { + err = -ENOMEM; + goto exit; + } + + /* + * The SFDP is organized in chunks of DWORDs. Thus, in theory, the + * sfdp_size should be a multiple of DWORDs. But in case a flash + * is not spec compliant, make sure that we have enough space to store + * the complete SFDP data. + */ + sfdp->num_dwords = DIV_ROUND_UP(sfdp_size, sizeof(*sfdp->dwords)); + sfdp->dwords = devm_kcalloc(dev, sfdp->num_dwords, + sizeof(*sfdp->dwords), GFP_KERNEL); + if (!sfdp->dwords) { + err = -ENOMEM; + devm_kfree(dev, sfdp); + goto exit; + } + + err = spi_nor_read_sfdp(nor, 0, sfdp_size, sfdp->dwords); + if (err < 0) { + dev_dbg(dev, "failed to read SFDP data\n"); + devm_kfree(dev, sfdp->dwords); + devm_kfree(dev, sfdp); + goto exit; + } + + nor->sfdp = sfdp; + /* * Check other parameter headers to get the latest revision of * the basic flash parameter table. diff --git a/drivers/mtd/spi-nor/sysfs.c b/drivers/mtd/spi-nor/sysfs.c new file mode 100644 index 00000000000000..9aec9d8a98ada8 --- /dev/null +++ b/drivers/mtd/spi-nor/sysfs.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#include "core.h" + +static ssize_t manufacturer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + return sysfs_emit(buf, "%s\n", nor->manufacturer->name); +} +static DEVICE_ATTR_RO(manufacturer); + +static ssize_t partname_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + return sysfs_emit(buf, "%s\n", nor->info->name); +} +static DEVICE_ATTR_RO(partname); + +static ssize_t jedec_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spi_device *spi = to_spi_device(dev); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + return sysfs_emit(buf, "%*phN\n", nor->info->id_len, nor->info->id); +} +static DEVICE_ATTR_RO(jedec_id); + +static struct attribute *spi_nor_sysfs_entries[] = { + &dev_attr_manufacturer.attr, + &dev_attr_partname.attr, + &dev_attr_jedec_id.attr, + NULL +}; + +static ssize_t sfdp_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct spi_device *spi = to_spi_device(kobj_to_dev(kobj)); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + struct sfdp *sfdp = nor->sfdp; + size_t sfdp_size = sfdp->num_dwords * sizeof(*sfdp->dwords); + + return memory_read_from_buffer(buf, count, &off, nor->sfdp->dwords, + sfdp_size); +} +static BIN_ATTR_RO(sfdp, 0); + +static struct bin_attribute *spi_nor_sysfs_bin_entries[] = { + &bin_attr_sfdp, + NULL +}; + +static umode_t spi_nor_sysfs_is_bin_visible(struct kobject *kobj, + struct bin_attribute *attr, int n) +{ + struct spi_device *spi = to_spi_device(kobj_to_dev(kobj)); + struct spi_mem *spimem = spi_get_drvdata(spi); + struct spi_nor *nor = spi_mem_get_drvdata(spimem); + + if (attr == &bin_attr_sfdp && nor->sfdp) + return 0444; + + return 0; +} + +static const struct attribute_group spi_nor_sysfs_group = { + .name = "spi-nor", + .is_bin_visible = spi_nor_sysfs_is_bin_visible, + .attrs = spi_nor_sysfs_entries, + .bin_attrs = spi_nor_sysfs_bin_entries, +}; + +const struct attribute_group *spi_nor_sysfs_groups[] = { + &spi_nor_sysfs_group, + NULL +}; diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index 9a81c67a60c623..96573f61caf546 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -139,6 +139,7 @@ static int winbond_set_4byte_addr_mode(struct spi_nor *nor, bool enable) static const struct spi_nor_otp_ops winbond_otp_ops = { .read = spi_nor_otp_read_secr, .write = spi_nor_otp_write_secr, + .erase = spi_nor_otp_erase_secr, .lock = spi_nor_otp_lock_sr2, .is_locked = spi_nor_otp_is_locked_sr2, }; diff --git a/drivers/mtd/tests/oobtest.c b/drivers/mtd/tests/oobtest.c index c71daa89bfce0f..532997e10e299a 100644 --- a/drivers/mtd/tests/oobtest.c +++ b/drivers/mtd/tests/oobtest.c @@ -506,7 +506,6 @@ static int __init mtd_oobtest_init(void) err = mtd_write_oob(mtd, addr0, &ops); if (err) { pr_info("error occurred as expected\n"); - err = 0; } else { pr_err("error: can write past end of OOB\n"); errcnt += 1; @@ -529,7 +528,6 @@ static int __init mtd_oobtest_init(void) if (err) { pr_info("error occurred as expected\n"); - err = 0; } else { pr_err("error: can read past end of OOB\n"); errcnt += 1; @@ -553,7 +551,6 @@ static int __init mtd_oobtest_init(void) err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); if (err) { pr_info("error occurred as expected\n"); - err = 0; } else { pr_err("error: wrote past end of device\n"); errcnt += 1; @@ -576,7 +573,6 @@ static int __init mtd_oobtest_init(void) if (err) { pr_info("error occurred as expected\n"); - err = 0; } else { pr_err("error: read past end of device\n"); errcnt += 1; @@ -600,7 +596,6 @@ static int __init mtd_oobtest_init(void) err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops); if (err) { pr_info("error occurred as expected\n"); - err = 0; } else { pr_err("error: wrote past end of device\n"); errcnt += 1; @@ -623,7 +618,6 @@ static int __init mtd_oobtest_init(void) if (err) { pr_info("error occurred as expected\n"); - err = 0; } else { pr_err("error: read past end of device\n"); errcnt += 1; @@ -701,6 +695,7 @@ static int __init mtd_oobtest_init(void) (long long)addr); errcnt += 1; if (errcnt > 1000) { + err = -EINVAL; pr_err("error: too many errors\n"); goto out; } diff --git a/drivers/mtd/tests/torturetest.c b/drivers/mtd/tests/torturetest.c index 6787ac5471a947..841689b4d86d24 100644 --- a/drivers/mtd/tests/torturetest.c +++ b/drivers/mtd/tests/torturetest.c @@ -230,8 +230,6 @@ static int __init tort_init(void) if (!bad_ebs) goto out_check_buf; - err = 0; - /* Initialize patterns */ memset(patt_FF, 0xFF, mtd->erasesize); for (i = 0; i < mtd->erasesize / pgsize; i++) { diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index bca671ff4e5460..62d363a399d395 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -789,7 +789,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) nvmem->reg_write = config->reg_write; nvmem->keepout = config->keepout; nvmem->nkeepout = config->nkeepout; - if (!config->no_of_node) + if (config->of_node) + nvmem->dev.of_node = config->of_node; + else if (!config->no_of_node) nvmem->dev.of_node = config->dev->of_node; switch (config->id) { diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index a89955f3cbc881..88227044fc86c9 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -380,6 +380,8 @@ struct mtd_info { int usecount; struct mtd_debug_info dbg; struct nvmem_device *nvmem; + struct nvmem_device *otp_user_nvmem; + struct nvmem_device *otp_factory_nvmem; /* * Parent device from the MTD partition point of view. diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h index 339ac798568ed9..a7376f9beddfd6 100644 --- a/include/linux/mtd/onfi.h +++ b/include/linux/mtd/onfi.h @@ -11,6 +11,7 @@ #define __LINUX_MTD_ONFI_H #include +#include /* ONFI version bits */ #define ONFI_VERSION_1_0 BIT(1) @@ -24,17 +25,22 @@ #define ONFI_VERSION_4_0 BIT(9) /* ONFI features */ -#define ONFI_FEATURE_16_BIT_BUS (1 << 0) -#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) +#define ONFI_FEATURE_16_BIT_BUS BIT(0) +#define ONFI_FEATURE_NV_DDR BIT(5) +#define ONFI_FEATURE_EXT_PARAM_PAGE BIT(7) /* ONFI timing mode, used in both asynchronous and synchronous mode */ -#define ONFI_TIMING_MODE_0 (1 << 0) -#define ONFI_TIMING_MODE_1 (1 << 1) -#define ONFI_TIMING_MODE_2 (1 << 2) -#define ONFI_TIMING_MODE_3 (1 << 3) -#define ONFI_TIMING_MODE_4 (1 << 4) -#define ONFI_TIMING_MODE_5 (1 << 5) -#define ONFI_TIMING_MODE_UNKNOWN (1 << 6) +#define ONFI_DATA_INTERFACE_SDR 0 +#define ONFI_DATA_INTERFACE_NVDDR BIT(4) +#define ONFI_DATA_INTERFACE_NVDDR2 BIT(5) +#define ONFI_TIMING_MODE_0 BIT(0) +#define ONFI_TIMING_MODE_1 BIT(1) +#define ONFI_TIMING_MODE_2 BIT(2) +#define ONFI_TIMING_MODE_3 BIT(3) +#define ONFI_TIMING_MODE_4 BIT(4) +#define ONFI_TIMING_MODE_5 BIT(5) +#define ONFI_TIMING_MODE_UNKNOWN BIT(6) +#define ONFI_TIMING_MODE_PARAM(x) FIELD_GET(GENMASK(3, 0), (x)) /* ONFI feature number/address */ #define ONFI_FEATURE_NUMBER 256 @@ -49,7 +55,7 @@ #define ONFI_SUBFEATURE_PARAM_LEN 4 /* ONFI optional commands SET/GET FEATURES supported? */ -#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) +#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2) struct nand_onfi_params { /* rev info and features block */ @@ -93,14 +99,15 @@ struct nand_onfi_params { /* electrical parameter block */ u8 io_pin_capacitance_max; - __le16 async_timing_mode; + __le16 sdr_timing_modes; __le16 program_cache_timing_mode; __le16 t_prog; __le16 t_bers; __le16 t_r; __le16 t_ccs; - __le16 src_sync_timing_mode; - u8 src_ssync_features; + u8 nvddr_timing_modes; + u8 nvddr2_timing_modes; + u8 nvddr_nvddr2_features; __le16 clk_pin_capacitance_typ; __le16 io_pin_capacitance_typ; __le16 input_pin_capacitance_typ; @@ -160,7 +167,9 @@ struct onfi_ext_param_page { * @tBERS: Block erase time * @tR: Page read time * @tCCS: Change column setup time - * @async_timing_mode: Supported asynchronous timing mode + * @fast_tCAD: Command/Address/Data slow or fast delay (NV-DDR only) + * @sdr_timing_modes: Supported asynchronous/SDR timing modes + * @nvddr_timing_modes: Supported source synchronous/NV-DDR timing modes * @vendor_revision: Vendor specific revision number * @vendor: Vendor specific data */ @@ -170,7 +179,9 @@ struct onfi_params { u16 tBERS; u16 tR; u16 tCCS; - u16 async_timing_mode; + bool fast_tCAD; + u16 sdr_timing_modes; + u16 nvddr_timing_modes; u16 vendor_revision; u8 vendor[88]; }; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 29df2f43dcb5a6..b2f9dd3cbd695c 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -24,6 +24,7 @@ #include struct nand_chip; +struct gpio_desc; /* The maximum number of NAND chips in an array */ #define NAND_MAX_CHIPS 8 @@ -385,8 +386,8 @@ struct nand_ecc_ctrl { * This struct defines the timing requirements of a SDR NAND chip. * These information can be found in every NAND datasheets and the timings * meaning are described in the ONFI specifications: - * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing - * Parameters) + * https://media-www.micron.com/-/media/client/onfi/specs/onfi_3_1_spec.pdf + * (chapter 4.15 Timing Parameters) * * All these timings are expressed in picoseconds. * @@ -471,12 +472,128 @@ struct nand_sdr_timings { u32 tWW_min; }; +/** + * struct nand_nvddr_timings - NV-DDR NAND chip timings + * + * This struct defines the timing requirements of a NV-DDR NAND data interface. + * These information can be found in every NAND datasheets and the timings + * meaning are described in the ONFI specifications: + * https://media-www.micron.com/-/media/client/onfi/specs/onfi_4_1_gold.pdf + * (chapter 4.18.2 NV-DDR) + * + * All these timings are expressed in picoseconds. + * + * @tBERS_max: Block erase time + * @tCCS_min: Change column setup time + * @tPROG_max: Page program time + * @tR_max: Page read time + * @tAC_min: Access window of DQ[7:0] from CLK + * @tAC_max: Access window of DQ[7:0] from CLK + * @tADL_min: ALE to data loading time + * @tCAD_min: Command, Address, Data delay + * @tCAH_min: Command/Address DQ hold time + * @tCALH_min: W/R_n, CLE and ALE hold time + * @tCALS_min: W/R_n, CLE and ALE setup time + * @tCAS_min: Command/address DQ setup time + * @tCEH_min: CE# high hold time + * @tCH_min: CE# hold time + * @tCK_min: Average clock cycle time + * @tCS_min: CE# setup time + * @tDH_min: Data hold time + * @tDQSCK_min: Start of the access window of DQS from CLK + * @tDQSCK_max: End of the access window of DQS from CLK + * @tDQSD_min: Min W/R_n low to DQS/DQ driven by device + * @tDQSD_max: Max W/R_n low to DQS/DQ driven by device + * @tDQSHZ_max: W/R_n high to DQS/DQ tri-state by device + * @tDQSQ_max: DQS-DQ skew, DQS to last DQ valid, per access + * @tDS_min: Data setup time + * @tDSC_min: DQS cycle time + * @tFEAT_max: Busy time for Set Features and Get Features + * @tITC_max: Interface and Timing Mode Change time + * @tQHS_max: Data hold skew factor + * @tRHW_min: Data output cycle to command, address, or data input cycle + * @tRR_min: Ready to RE# low (data only) + * @tRST_max: Device reset time, measured from the falling edge of R/B# to the + * rising edge of R/B#. + * @tWB_max: WE# high to SR[6] low + * @tWHR_min: WE# high to RE# low + * @tWRCK_min: W/R_n low to data output cycle + * @tWW_min: WP# transition to WE# low + */ +struct nand_nvddr_timings { + u64 tBERS_max; + u32 tCCS_min; + u64 tPROG_max; + u64 tR_max; + u32 tAC_min; + u32 tAC_max; + u32 tADL_min; + u32 tCAD_min; + u32 tCAH_min; + u32 tCALH_min; + u32 tCALS_min; + u32 tCAS_min; + u32 tCEH_min; + u32 tCH_min; + u32 tCK_min; + u32 tCS_min; + u32 tDH_min; + u32 tDQSCK_min; + u32 tDQSCK_max; + u32 tDQSD_min; + u32 tDQSD_max; + u32 tDQSHZ_max; + u32 tDQSQ_max; + u32 tDS_min; + u32 tDSC_min; + u32 tFEAT_max; + u32 tITC_max; + u32 tQHS_max; + u32 tRHW_min; + u32 tRR_min; + u32 tRST_max; + u32 tWB_max; + u32 tWHR_min; + u32 tWRCK_min; + u32 tWW_min; +}; + +/* + * While timings related to the data interface itself are mostly different + * between SDR and NV-DDR, timings related to the internal chip behavior are + * common. IOW, the following entries which describe the internal delays have + * the same definition and are shared in both SDR and NV-DDR timing structures: + * - tADL_min + * - tBERS_max + * - tCCS_min + * - tFEAT_max + * - tPROG_max + * - tR_max + * - tRR_min + * - tRST_max + * - tWB_max + * + * The below macros return the value of a given timing, no matter the interface. + */ +#define NAND_COMMON_TIMING_PS(conf, timing_name) \ + nand_interface_is_sdr(conf) ? \ + nand_get_sdr_timings(conf)->timing_name : \ + nand_get_nvddr_timings(conf)->timing_name + +#define NAND_COMMON_TIMING_MS(conf, timing_name) \ + PSEC_TO_MSEC(NAND_COMMON_TIMING_PS((conf), timing_name)) + +#define NAND_COMMON_TIMING_NS(conf, timing_name) \ + PSEC_TO_NSEC(NAND_COMMON_TIMING_PS((conf), timing_name)) + /** * enum nand_interface_type - NAND interface type * @NAND_SDR_IFACE: Single Data Rate interface + * @NAND_NVDDR_IFACE: Double Data Rate interface */ enum nand_interface_type { NAND_SDR_IFACE, + NAND_NVDDR_IFACE, }; /** @@ -485,6 +602,7 @@ enum nand_interface_type { * @timings: The timing information * @timings.mode: Timing mode as defined in the specification * @timings.sdr: Use it when @type is %NAND_SDR_IFACE. + * @timings.nvddr: Use it when @type is %NAND_NVDDR_IFACE. */ struct nand_interface_config { enum nand_interface_type type; @@ -492,10 +610,29 @@ struct nand_interface_config { unsigned int mode; union { struct nand_sdr_timings sdr; + struct nand_nvddr_timings nvddr; }; } timings; }; +/** + * nand_interface_is_sdr - get the interface type + * @conf: The data interface + */ +static bool nand_interface_is_sdr(const struct nand_interface_config *conf) +{ + return conf->type == NAND_SDR_IFACE; +} + +/** + * nand_interface_is_nvddr - get the interface type + * @conf: The data interface + */ +static bool nand_interface_is_nvddr(const struct nand_interface_config *conf) +{ + return conf->type == NAND_NVDDR_IFACE; +} + /** * nand_get_sdr_timings - get SDR timing from data interface * @conf: The data interface @@ -503,12 +640,25 @@ struct nand_interface_config { static inline const struct nand_sdr_timings * nand_get_sdr_timings(const struct nand_interface_config *conf) { - if (conf->type != NAND_SDR_IFACE) + if (!nand_interface_is_sdr(conf)) return ERR_PTR(-EINVAL); return &conf->timings.sdr; } +/** + * nand_get_nvddr_timings - get NV-DDR timing from data interface + * @conf: The data interface + */ +static inline const struct nand_nvddr_timings * +nand_get_nvddr_timings(const struct nand_interface_config *conf) +{ + if (!nand_interface_is_nvddr(conf)) + return ERR_PTR(-EINVAL); + + return &conf->timings.nvddr; +} + /** * struct nand_op_cmd_instr - Definition of a command instruction * @opcode: the command to issue in one cycle @@ -1413,7 +1563,6 @@ void nand_cleanup(struct nand_chip *chip); * instruction and have no physical pin to check it. */ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms); -struct gpio_desc; int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod, unsigned long timeout_ms); @@ -1446,4 +1595,8 @@ static inline void *nand_get_data_buf(struct nand_chip *chip) return chip->data_buf; } +/* Parse the gpio-cs property */ +int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array, + unsigned int *ncs_array); + #endif /* __LINUX_MTD_RAWNAND_H */ diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 98ed91b529ea44..f67457748ed848 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -383,6 +383,7 @@ struct spi_nor_flash_parameter; * @read_proto: the SPI protocol for read operations * @write_proto: the SPI protocol for write operations * @reg_proto: the SPI protocol for read_reg/write_reg/erase operations + * @sfdp: the SFDP data of the flash * @controller_ops: SPI NOR controller driver specific operations. * @params: [FLASH-SPECIFIC] SPI NOR flash parameters and settings. * The structure includes legacy flash parameters and @@ -412,6 +413,7 @@ struct spi_nor { bool sst_write_second; u32 flags; enum spi_nor_cmd_ext cmd_ext_type; + struct sfdp *sfdp; const struct spi_nor_controller_ops *controller_ops; diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index e162b757b6d547..471cb7b9e89602 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -57,6 +57,7 @@ struct nvmem_keepout { * @type: Type of the nvmem storage * @read_only: Device is read-only. * @root_only: Device is accessibly to root only. + * @of_node: If given, this will be used instead of the parent's of_node. * @no_of_node: Device should not use the parent's of_node even if it's !NULL. * @reg_read: Callback to read data. * @reg_write: Callback to write data. @@ -86,6 +87,7 @@ struct nvmem_config { enum nvmem_type type; bool read_only; bool root_only; + struct device_node *of_node; bool no_of_node; nvmem_reg_read_t reg_read; nvmem_reg_write_t reg_write; diff --git a/include/linux/pl353-smc.h b/include/linux/pl353-smc.h deleted file mode 100644 index 0e0d3df9bf7243..00000000000000 --- a/include/linux/pl353-smc.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * ARM PL353 SMC Driver Header - * - * Copyright (C) 2012 - 2018 Xilinx, Inc - */ - -#ifndef __LINUX_PL353_SMC_H -#define __LINUX_PL353_SMC_H - -enum pl353_smc_ecc_mode { - PL353_SMC_ECCMODE_BYPASS = 0, - PL353_SMC_ECCMODE_APB = 1, - PL353_SMC_ECCMODE_MEM = 2 -}; - -enum pl353_smc_mem_width { - PL353_SMC_MEM_WIDTH_8 = 0, - PL353_SMC_MEM_WIDTH_16 = 1 -}; - -u32 pl353_smc_get_ecc_val(int ecc_reg); -bool pl353_smc_ecc_is_busy(void); -int pl353_smc_get_nand_int_status_raw(void); -void pl353_smc_clr_nand_int(void); -int pl353_smc_set_ecc_mode(enum pl353_smc_ecc_mode mode); -int pl353_smc_set_ecc_pg_size(unsigned int pg_sz); -int pl353_smc_set_buswidth(unsigned int bw); -void pl353_smc_set_cycles(u32 timings[]); -#endif